if_sriov.c revision 1.5 1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
34
35 #include "ixgbe.h"
36 #include "ixgbe_sriov.h"
37
38 #ifdef PCI_IOV
39
40 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
41
42 /************************************************************************
43 * ixgbe_pci_iov_detach
44 ************************************************************************/
45 int
46 ixgbe_pci_iov_detach(device_t dev)
47 {
48 return pci_iov_detach(dev);
49 }
50
51 /************************************************************************
52 * ixgbe_define_iov_schemas
53 ************************************************************************/
54 void
55 ixgbe_define_iov_schemas(device_t dev, int *error)
56 {
57 nvlist_t *pf_schema, *vf_schema;
58
59 pf_schema = pci_iov_schema_alloc_node();
60 vf_schema = pci_iov_schema_alloc_node();
61 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
62 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
63 IOV_SCHEMA_HASDEFAULT, TRUE);
64 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
65 IOV_SCHEMA_HASDEFAULT, FALSE);
66 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
67 IOV_SCHEMA_HASDEFAULT, FALSE);
68 *error = pci_iov_attach(dev, pf_schema, vf_schema);
69 if (*error != 0) {
70 device_printf(dev,
71 "Error %d setting up SR-IOV\n", *error);
72 }
73 } /* ixgbe_define_iov_schemas */
74
75 /************************************************************************
76 * ixgbe_align_all_queue_indices
77 ************************************************************************/
78 inline void
79 ixgbe_align_all_queue_indices(struct adapter *adapter)
80 {
81 int i;
82 int index;
83
84 for (i = 0; i < adapter->num_queues; i++) {
85 index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
86 adapter->rx_rings[i].me = index;
87 adapter->tx_rings[i].me = index;
88 }
89 }
90
91 /* Support functions for SR-IOV/VF management */
92 static inline void
93 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
94 {
95 if (vf->flags & IXGBE_VF_CTS)
96 msg |= IXGBE_VT_MSGTYPE_CTS;
97
98 adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
99 }
100
101 static inline void
102 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
103 {
104 msg &= IXGBE_VT_MSG_MASK;
105 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
106 }
107
108 static inline void
109 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 {
111 msg &= IXGBE_VT_MSG_MASK;
112 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
113 }
114
115 static inline void
116 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
117 {
118 if (!(vf->flags & IXGBE_VF_CTS))
119 ixgbe_send_vf_nack(adapter, vf, 0);
120 }
121
122 static inline boolean_t
123 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
124 {
125 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
126 }
127
128 static inline int
129 ixgbe_vf_queues(int mode)
130 {
131 switch (mode) {
132 case IXGBE_64_VM:
133 return (2);
134 case IXGBE_32_VM:
135 return (4);
136 case IXGBE_NO_VM:
137 default:
138 return (0);
139 }
140 }
141
142 inline int
143 ixgbe_vf_que_index(int mode, int vfnum, int num)
144 {
145 return ((vfnum * ixgbe_vf_queues(mode)) + num);
146 }
147
148 static inline void
149 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
150 {
151 if (adapter->max_frame_size < max_frame)
152 adapter->max_frame_size = max_frame;
153 }
154
155 inline u32
156 ixgbe_get_mrqc(int iov_mode)
157 {
158 u32 mrqc;
159
160 switch (iov_mode) {
161 case IXGBE_64_VM:
162 mrqc = IXGBE_MRQC_VMDQRSS64EN;
163 break;
164 case IXGBE_32_VM:
165 mrqc = IXGBE_MRQC_VMDQRSS32EN;
166 break;
167 case IXGBE_NO_VM:
168 mrqc = 0;
169 break;
170 default:
171 panic("Unexpected SR-IOV mode %d", iov_mode);
172 }
173
174 return mrqc;
175 }
176
177
178 inline u32
179 ixgbe_get_mtqc(int iov_mode)
180 {
181 uint32_t mtqc;
182
183 switch (iov_mode) {
184 case IXGBE_64_VM:
185 mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
186 break;
187 case IXGBE_32_VM:
188 mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
189 break;
190 case IXGBE_NO_VM:
191 mtqc = IXGBE_MTQC_64Q_1PB;
192 break;
193 default:
194 panic("Unexpected SR-IOV mode %d", iov_mode);
195 }
196
197 return mtqc;
198 }
199
200 void
201 ixgbe_ping_all_vfs(struct adapter *adapter)
202 {
203 struct ixgbe_vf *vf;
204
205 for (int i = 0; i < adapter->num_vfs; i++) {
206 vf = &adapter->vfs[i];
207 if (vf->flags & IXGBE_VF_ACTIVE)
208 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
209 }
210 } /* ixgbe_ping_all_vfs */
211
212
213 static void
214 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
215 uint16_t tag)
216 {
217 struct ixgbe_hw *hw;
218 uint32_t vmolr, vmvir;
219
220 hw = &adapter->hw;
221
222 vf->vlan_tag = tag;
223
224 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
225
226 /* Do not receive packets that pass inexact filters. */
227 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
228
229 /* Disable Multicast Promicuous Mode. */
230 vmolr &= ~IXGBE_VMOLR_MPE;
231
232 /* Accept broadcasts. */
233 vmolr |= IXGBE_VMOLR_BAM;
234
235 if (tag == 0) {
236 /* Accept non-vlan tagged traffic. */
237 vmolr |= IXGBE_VMOLR_AUPE;
238
239 /* Allow VM to tag outgoing traffic; no default tag. */
240 vmvir = 0;
241 } else {
242 /* Require vlan-tagged traffic. */
243 vmolr &= ~IXGBE_VMOLR_AUPE;
244
245 /* Tag all traffic with provided vlan tag. */
246 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
247 }
248 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
249 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
250 } /* ixgbe_vf_set_default_vlan */
251
252
253 static void
254 ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
255 {
256 uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
257 uint16_t mbx_size = hw->mbx.size;
258 uint16_t i;
259
260 IXGBE_CORE_LOCK_ASSERT(adapter);
261
262 for (i = 0; i < mbx_size; ++i)
263 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
264 } /* ixgbe_clear_vfmbmem */
265
266
267 static void
268 ixgbe_toggle_txdctl(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
269 {
270 uint32_t vf_index, offset, reg;
271 uint8_t queue_count, i;
272
273 IXGBE_CORE_LOCK_ASSERT(adapter);
274
275 vf_index = IXGBE_VF_INDEX(vf->pool);
276
277 /* Determine number of queues by checking
278 * number of virtual functions */
279 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
280 switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
281 case IXGBE_GCR_EXT_VT_MODE_64:
282 queue_count = 2;
283 break;
284 case IXGBE_GCR_EXT_VT_MODE_32:
285 queue_count = 4;
286 break;
287 default:
288 return;
289 }
290
291 /* Toggle queues */
292 for (i = 0; i < queue_count; ++i) {
293 /* Calculate offset of current queue */
294 offset = queue_count * vf_index + i;
295
296 /* Enable queue */
297 reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
298 reg |= IXGBE_TXDCTL_ENABLE;
299 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
300 IXGBE_WRITE_FLUSH(hw);
301
302 /* Disable queue */
303 reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
304 reg &= ~IXGBE_TXDCTL_ENABLE;
305 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
306 IXGBE_WRITE_FLUSH(hw);
307 }
308 } /* ixgbe_toggle_txdctl */
309
310
311 static boolean_t
312 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
313 {
314
315 /*
316 * Frame size compatibility between PF and VF is only a problem on
317 * 82599-based cards. X540 and later support any combination of jumbo
318 * frames on PFs and VFs.
319 */
320 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
321 return (TRUE);
322
323 switch (vf->api_ver) {
324 case IXGBE_API_VER_1_0:
325 case IXGBE_API_VER_UNKNOWN:
326 /*
327 * On legacy (1.0 and older) VF versions, we don't support jumbo
328 * frames on either the PF or the VF.
329 */
330 if (adapter->max_frame_size > ETHER_MAX_LEN ||
331 vf->max_frame_size > ETHER_MAX_LEN)
332 return (FALSE);
333
334 return (TRUE);
335
336 break;
337 case IXGBE_API_VER_1_1:
338 default:
339 /*
340 * 1.1 or later VF versions always work if they aren't using
341 * jumbo frames.
342 */
343 if (vf->max_frame_size <= ETHER_MAX_LEN)
344 return (TRUE);
345
346 /*
347 * Jumbo frames only work with VFs if the PF is also using jumbo
348 * frames.
349 */
350 if (adapter->max_frame_size <= ETHER_MAX_LEN)
351 return (TRUE);
352
353 return (FALSE);
354 }
355 } /* ixgbe_vf_frame_size_compatible */
356
357
358 static void
359 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
360 {
361 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
362
363 // XXX clear multicast addresses
364
365 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
366 ixgbe_clear_vfmbmem(&adapter->hw, vf);
367 ixgbe_toggle_txdctl(&adapter->hw, vf);
368
369 vf->api_ver = IXGBE_API_VER_UNKNOWN;
370 } /* ixgbe_process_vf_reset */
371
372
373 static void
374 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
375 {
376 struct ixgbe_hw *hw;
377 uint32_t vf_index, vfte;
378
379 hw = &adapter->hw;
380
381 vf_index = IXGBE_VF_INDEX(vf->pool);
382 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
383 vfte |= IXGBE_VF_BIT(vf->pool);
384 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
385 } /* ixgbe_vf_enable_transmit */
386
387
388 static void
389 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
390 {
391 struct ixgbe_hw *hw;
392 uint32_t vf_index, vfre;
393
394 hw = &adapter->hw;
395
396 vf_index = IXGBE_VF_INDEX(vf->pool);
397 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
398 if (ixgbe_vf_frame_size_compatible(adapter, vf))
399 vfre |= IXGBE_VF_BIT(vf->pool);
400 else
401 vfre &= ~IXGBE_VF_BIT(vf->pool);
402 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
403 } /* ixgbe_vf_enable_receive */
404
405
406 static void
407 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
408 {
409 struct ixgbe_hw *hw;
410 uint32_t ack;
411 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
412
413 hw = &adapter->hw;
414
415 ixgbe_process_vf_reset(adapter, vf);
416
417 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
418 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
419 vf->pool, TRUE);
420 ack = IXGBE_VT_MSGTYPE_ACK;
421 } else
422 ack = IXGBE_VT_MSGTYPE_NACK;
423
424 ixgbe_vf_enable_transmit(adapter, vf);
425 ixgbe_vf_enable_receive(adapter, vf);
426
427 vf->flags |= IXGBE_VF_CTS;
428
429 resp[0] = IXGBE_VF_RESET | ack;
430 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
431 resp[3] = hw->mac.mc_filter_type;
432 hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
433 } /* ixgbe_vf_reset_msg */
434
435
436 static void
437 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
438 {
439 uint8_t *mac;
440
441 mac = (uint8_t*)&msg[1];
442
443 /* Check that the VF has permission to change the MAC address. */
444 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
445 ixgbe_send_vf_nack(adapter, vf, msg[0]);
446 return;
447 }
448
449 if (ixgbe_validate_mac_addr(mac) != 0) {
450 ixgbe_send_vf_nack(adapter, vf, msg[0]);
451 return;
452 }
453
454 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
455
456 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
457 TRUE);
458
459 ixgbe_send_vf_ack(adapter, vf, msg[0]);
460 } /* ixgbe_vf_set_mac */
461
462
463 /*
464 * VF multicast addresses are set by using the appropriate bit in
465 * 1 of 128 32 bit addresses (4096 possible).
466 */
467 static void
468 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
469 {
470 u16 *list = (u16*)&msg[1];
471 int entries;
472 u32 vmolr, vec_bit, vec_reg, mta_reg;
473
474 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
475 entries = uimin(entries, IXGBE_MAX_VF_MC);
476
477 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
478
479 vf->num_mc_hashes = entries;
480
481 /* Set the appropriate MTA bit */
482 for (int i = 0; i < entries; i++) {
483 vf->mc_hash[i] = list[i];
484 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
485 vec_bit = vf->mc_hash[i] & 0x1F;
486 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
487 mta_reg |= (1 << vec_bit);
488 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
489 }
490
491 vmolr |= IXGBE_VMOLR_ROMPE;
492 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
493 ixgbe_send_vf_ack(adapter, vf, msg[0]);
494 } /* ixgbe_vf_set_mc_addr */
495
496
497 static void
498 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
499 {
500 struct ixgbe_hw *hw;
501 int enable;
502 uint16_t tag;
503
504 hw = &adapter->hw;
505 enable = IXGBE_VT_MSGINFO(msg[0]);
506 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
507
508 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
509 ixgbe_send_vf_nack(adapter, vf, msg[0]);
510 return;
511 }
512
513 /* It is illegal to enable vlan tag 0. */
514 if (tag == 0 && enable != 0) {
515 ixgbe_send_vf_nack(adapter, vf, msg[0]);
516 return;
517 }
518
519 ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
520 ixgbe_send_vf_ack(adapter, vf, msg[0]);
521 } /* ixgbe_vf_set_vlan */
522
523
524 static void
525 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
526 {
527 struct ixgbe_hw *hw;
528 uint32_t vf_max_size, pf_max_size, mhadd;
529
530 hw = &adapter->hw;
531 vf_max_size = msg[1];
532
533 if (vf_max_size < ETHER_CRC_LEN) {
534 /* We intentionally ACK invalid LPE requests. */
535 ixgbe_send_vf_ack(adapter, vf, msg[0]);
536 return;
537 }
538
539 vf_max_size -= ETHER_CRC_LEN;
540
541 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
542 /* We intentionally ACK invalid LPE requests. */
543 ixgbe_send_vf_ack(adapter, vf, msg[0]);
544 return;
545 }
546
547 vf->max_frame_size = vf_max_size;
548 ixgbe_update_max_frame(adapter, vf->max_frame_size);
549
550 /*
551 * We might have to disable reception to this VF if the frame size is
552 * not compatible with the config on the PF.
553 */
554 ixgbe_vf_enable_receive(adapter, vf);
555
556 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
557 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
558
559 if (pf_max_size < adapter->max_frame_size) {
560 mhadd &= ~IXGBE_MHADD_MFS_MASK;
561 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
562 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
563 }
564
565 ixgbe_send_vf_ack(adapter, vf, msg[0]);
566 } /* ixgbe_vf_set_lpe */
567
568
569 static void
570 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
571 uint32_t *msg)
572 {
573 //XXX implement this
574 ixgbe_send_vf_nack(adapter, vf, msg[0]);
575 } /* ixgbe_vf_set_macvlan */
576
577
578 static void
579 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
580 uint32_t *msg)
581 {
582
583 switch (msg[1]) {
584 case IXGBE_API_VER_1_0:
585 case IXGBE_API_VER_1_1:
586 vf->api_ver = msg[1];
587 ixgbe_send_vf_ack(adapter, vf, msg[0]);
588 break;
589 default:
590 vf->api_ver = IXGBE_API_VER_UNKNOWN;
591 ixgbe_send_vf_nack(adapter, vf, msg[0]);
592 break;
593 }
594 } /* ixgbe_vf_api_negotiate */
595
596
597 static void
598 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
599 {
600 struct ixgbe_hw *hw;
601 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
602 int num_queues;
603
604 hw = &adapter->hw;
605
606 /* GET_QUEUES is not supported on pre-1.1 APIs. */
607 switch (msg[0]) {
608 case IXGBE_API_VER_1_0:
609 case IXGBE_API_VER_UNKNOWN:
610 ixgbe_send_vf_nack(adapter, vf, msg[0]);
611 return;
612 }
613
614 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
615 IXGBE_VT_MSGTYPE_CTS;
616
617 num_queues = ixgbe_vf_queues(adapter->iov_mode);
618 resp[IXGBE_VF_TX_QUEUES] = num_queues;
619 resp[IXGBE_VF_RX_QUEUES] = num_queues;
620 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
621 resp[IXGBE_VF_DEF_QUEUE] = 0;
622
623 hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
624 } /* ixgbe_vf_get_queues */
625
626
627 static void
628 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
629 {
630 struct ixgbe_hw *hw;
631 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
632 int error;
633
634 hw = &adapter->hw;
635
636 error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
637
638 if (error != 0)
639 return;
640
641 CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
642 msg[0], vf->pool);
643 if (msg[0] == IXGBE_VF_RESET) {
644 ixgbe_vf_reset_msg(adapter, vf, msg);
645 return;
646 }
647
648 if (!(vf->flags & IXGBE_VF_CTS)) {
649 ixgbe_send_vf_nack(adapter, vf, msg[0]);
650 return;
651 }
652
653 switch (msg[0] & IXGBE_VT_MSG_MASK) {
654 case IXGBE_VF_SET_MAC_ADDR:
655 ixgbe_vf_set_mac(adapter, vf, msg);
656 break;
657 case IXGBE_VF_SET_MULTICAST:
658 ixgbe_vf_set_mc_addr(adapter, vf, msg);
659 break;
660 case IXGBE_VF_SET_VLAN:
661 ixgbe_vf_set_vlan(adapter, vf, msg);
662 break;
663 case IXGBE_VF_SET_LPE:
664 ixgbe_vf_set_lpe(adapter, vf, msg);
665 break;
666 case IXGBE_VF_SET_MACVLAN:
667 ixgbe_vf_set_macvlan(adapter, vf, msg);
668 break;
669 case IXGBE_VF_API_NEGOTIATE:
670 ixgbe_vf_api_negotiate(adapter, vf, msg);
671 break;
672 case IXGBE_VF_GET_QUEUES:
673 ixgbe_vf_get_queues(adapter, vf, msg);
674 break;
675 default:
676 ixgbe_send_vf_nack(adapter, vf, msg[0]);
677 }
678 } /* ixgbe_process_vf_msg */
679
680
681 /* Tasklet for handling VF -> PF mailbox messages */
682 void
683 ixgbe_handle_mbx(void *context, int pending)
684 {
685 struct adapter *adapter = context;
686 struct ixgbe_hw *hw;
687 struct ixgbe_vf *vf;
688 int i;
689
690 hw = &adapter->hw;
691
692 IXGBE_CORE_LOCK(adapter);
693 for (i = 0; i < adapter->num_vfs; i++) {
694 vf = &adapter->vfs[i];
695
696 if (vf->flags & IXGBE_VF_ACTIVE) {
697 if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
698 ixgbe_process_vf_reset(adapter, vf);
699
700 if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
701 ixgbe_process_vf_msg(adapter, vf);
702
703 if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
704 ixgbe_process_vf_ack(adapter, vf);
705 }
706 }
707 IXGBE_CORE_UNLOCK(adapter);
708 } /* ixgbe_handle_mbx */
709
710 int
711 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
712 {
713 struct adapter *adapter;
714 int retval = 0;
715
716 adapter = device_get_softc(dev);
717 adapter->iov_mode = IXGBE_NO_VM;
718
719 if (num_vfs == 0) {
720 /* Would we ever get num_vfs = 0? */
721 retval = EINVAL;
722 goto err_init_iov;
723 }
724
725 /*
726 * We've got to reserve a VM's worth of queues for the PF,
727 * thus we go into "64 VF mode" if 32+ VFs are requested.
728 * With 64 VFs, you can only have two queues per VF.
729 * With 32 VFs, you can have up to four queues per VF.
730 */
731 if (num_vfs >= IXGBE_32_VM)
732 adapter->iov_mode = IXGBE_64_VM;
733 else
734 adapter->iov_mode = IXGBE_32_VM;
735
736 /* Again, reserving 1 VM's worth of queues for the PF */
737 adapter->pool = adapter->iov_mode - 1;
738
739 if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
740 retval = ENOSPC;
741 goto err_init_iov;
742 }
743
744 IXGBE_CORE_LOCK(adapter);
745
746 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
747 M_NOWAIT | M_ZERO);
748
749 if (adapter->vfs == NULL) {
750 retval = ENOMEM;
751 IXGBE_CORE_UNLOCK(adapter);
752 goto err_init_iov;
753 }
754
755 adapter->num_vfs = num_vfs;
756
757 /* set the SRIOV flag now as it's needed
758 * by ixgbe_init_locked() */
759 adapter->feat_en |= IXGBE_FEATURE_SRIOV;
760 adapter->init_locked(adapter);
761
762 IXGBE_CORE_UNLOCK(adapter);
763
764 return (retval);
765
766 err_init_iov:
767 adapter->num_vfs = 0;
768 adapter->pool = 0;
769 adapter->iov_mode = IXGBE_NO_VM;
770
771 return (retval);
772 } /* ixgbe_init_iov */
773
774 void
775 ixgbe_uninit_iov(device_t dev)
776 {
777 struct ixgbe_hw *hw;
778 struct adapter *adapter;
779 uint32_t pf_reg, vf_reg;
780
781 adapter = device_get_softc(dev);
782 hw = &adapter->hw;
783
784 IXGBE_CORE_LOCK(adapter);
785
786 /* Enable rx/tx for the PF and disable it for all VFs. */
787 pf_reg = IXGBE_VF_INDEX(adapter->pool);
788 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
789 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
790
791 if (pf_reg == 0)
792 vf_reg = 1;
793 else
794 vf_reg = 0;
795 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
796 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
797
798 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
799
800 free(adapter->vfs, M_IXGBE_SRIOV);
801 adapter->vfs = NULL;
802 adapter->num_vfs = 0;
803 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
804
805 IXGBE_CORE_UNLOCK(adapter);
806 } /* ixgbe_uninit_iov */
807
808 static void
809 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
810 {
811 struct ixgbe_hw *hw;
812 uint32_t vf_index, pfmbimr;
813
814 IXGBE_CORE_LOCK_ASSERT(adapter);
815
816 hw = &adapter->hw;
817
818 if (!(vf->flags & IXGBE_VF_ACTIVE))
819 return;
820
821 vf_index = IXGBE_VF_INDEX(vf->pool);
822 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
823 pfmbimr |= IXGBE_VF_BIT(vf->pool);
824 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
825
826 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
827
828 // XXX multicast addresses
829
830 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
831 ixgbe_set_rar(&adapter->hw, vf->rar_index,
832 vf->ether_addr, vf->pool, TRUE);
833 }
834
835 ixgbe_vf_enable_transmit(adapter, vf);
836 ixgbe_vf_enable_receive(adapter, vf);
837
838 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
839 } /* ixgbe_init_vf */
840
841 void
842 ixgbe_initialize_iov(struct adapter *adapter)
843 {
844 struct ixgbe_hw *hw = &adapter->hw;
845 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
846 int i;
847
848 if (adapter->iov_mode == IXGBE_NO_VM)
849 return;
850
851 IXGBE_CORE_LOCK_ASSERT(adapter);
852
853 /* RMW appropriate registers based on IOV mode */
854 /* Read... */
855 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
856 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
857 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
858 /* Modify... */
859 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
860 mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
861 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
862 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
863 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
864 switch (adapter->iov_mode) {
865 case IXGBE_64_VM:
866 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
867 mtqc |= IXGBE_MTQC_64VF;
868 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
869 gpie |= IXGBE_GPIE_VTMODE_64;
870 break;
871 case IXGBE_32_VM:
872 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
873 mtqc |= IXGBE_MTQC_32VF;
874 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
875 gpie |= IXGBE_GPIE_VTMODE_32;
876 break;
877 default:
878 panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
879 }
880 /* Write... */
881 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
882 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
883 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
884 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
885
886 /* Enable rx/tx for the PF. */
887 vf_reg = IXGBE_VF_INDEX(adapter->pool);
888 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
889 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
890
891 /* Allow VM-to-VM communication. */
892 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
893
894 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
895 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
896 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
897
898 for (i = 0; i < adapter->num_vfs; i++)
899 ixgbe_init_vf(adapter, &adapter->vfs[i]);
900 } /* ixgbe_initialize_iov */
901
902
903 /* Check the max frame setting of all active VF's */
904 void
905 ixgbe_recalculate_max_frame(struct adapter *adapter)
906 {
907 struct ixgbe_vf *vf;
908
909 IXGBE_CORE_LOCK_ASSERT(adapter);
910
911 for (int i = 0; i < adapter->num_vfs; i++) {
912 vf = &adapter->vfs[i];
913 if (vf->flags & IXGBE_VF_ACTIVE)
914 ixgbe_update_max_frame(adapter, vf->max_frame_size);
915 }
916 } /* ixgbe_recalculate_max_frame */
917
918 int
919 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
920 {
921 struct adapter *adapter;
922 struct ixgbe_vf *vf;
923 const void *mac;
924
925 adapter = device_get_softc(dev);
926
927 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
928 vfnum, adapter->num_vfs));
929
930 IXGBE_CORE_LOCK(adapter);
931 vf = &adapter->vfs[vfnum];
932 vf->pool= vfnum;
933
934 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
935 vf->rar_index = vfnum + 1;
936 vf->default_vlan = 0;
937 vf->max_frame_size = ETHER_MAX_LEN;
938 ixgbe_update_max_frame(adapter, vf->max_frame_size);
939
940 if (nvlist_exists_binary(config, "mac-addr")) {
941 mac = nvlist_get_binary(config, "mac-addr", NULL);
942 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
943 if (nvlist_get_bool(config, "allow-set-mac"))
944 vf->flags |= IXGBE_VF_CAP_MAC;
945 } else
946 /*
947 * If the administrator has not specified a MAC address then
948 * we must allow the VF to choose one.
949 */
950 vf->flags |= IXGBE_VF_CAP_MAC;
951
952 vf->flags |= IXGBE_VF_ACTIVE;
953
954 ixgbe_init_vf(adapter, vf);
955 IXGBE_CORE_UNLOCK(adapter);
956
957 return (0);
958 } /* ixgbe_add_vf */
959
960 #else
961
962 void
963 ixgbe_handle_mbx(void *context, int pending)
964 {
965 UNREFERENCED_2PARAMETER(context, pending);
966 } /* ixgbe_handle_mbx */
967
968 inline int
969 ixgbe_vf_que_index(int mode, int vfnum, int num)
970 {
971 UNREFERENCED_2PARAMETER(mode, vfnum);
972
973 return num;
974 } /* ixgbe_vf_que_index */
975
976 #endif
977