if_sriov.c revision 1.9 1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
34
35 #include "ixgbe.h"
36 #include "ixgbe_sriov.h"
37
38 #ifdef PCI_IOV
39
40 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
41
42 /************************************************************************
43 * ixgbe_pci_iov_detach
44 ************************************************************************/
45 int
46 ixgbe_pci_iov_detach(device_t dev)
47 {
48 return pci_iov_detach(dev);
49 }
50
51 /************************************************************************
52 * ixgbe_define_iov_schemas
53 ************************************************************************/
54 void
55 ixgbe_define_iov_schemas(device_t dev, int *error)
56 {
57 nvlist_t *pf_schema, *vf_schema;
58
59 pf_schema = pci_iov_schema_alloc_node();
60 vf_schema = pci_iov_schema_alloc_node();
61 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
62 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
63 IOV_SCHEMA_HASDEFAULT, TRUE);
64 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
65 IOV_SCHEMA_HASDEFAULT, FALSE);
66 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
67 IOV_SCHEMA_HASDEFAULT, FALSE);
68 *error = pci_iov_attach(dev, pf_schema, vf_schema);
69 if (*error != 0) {
70 device_printf(dev,
71 "Error %d setting up SR-IOV\n", *error);
72 }
73 } /* ixgbe_define_iov_schemas */
74
75 /************************************************************************
76 * ixgbe_align_all_queue_indices
77 ************************************************************************/
78 inline void
79 ixgbe_align_all_queue_indices(struct adapter *adapter)
80 {
81 int i;
82 int index;
83
84 for (i = 0; i < adapter->num_queues; i++) {
85 index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
86 adapter->rx_rings[i].me = index;
87 adapter->tx_rings[i].me = index;
88 }
89 }
90
91 /* Support functions for SR-IOV/VF management */
92 static inline void
93 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
94 {
95 if (vf->flags & IXGBE_VF_CTS)
96 msg |= IXGBE_VT_MSGTYPE_CTS;
97
98 adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
99 }
100
101 static inline void
102 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
103 {
104 msg &= IXGBE_VT_MSG_MASK;
105 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
106 }
107
108 static inline void
109 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 {
111 msg &= IXGBE_VT_MSG_MASK;
112 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
113 }
114
115 static inline void
116 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
117 {
118 if (!(vf->flags & IXGBE_VF_CTS))
119 ixgbe_send_vf_nack(adapter, vf, 0);
120 }
121
122 static inline bool
123 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
124 {
125 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
126 }
127
128 static inline int
129 ixgbe_vf_queues(int mode)
130 {
131 switch (mode) {
132 case IXGBE_64_VM:
133 return (2);
134 case IXGBE_32_VM:
135 return (4);
136 case IXGBE_NO_VM:
137 default:
138 return (0);
139 }
140 }
141
142 inline int
143 ixgbe_vf_que_index(int mode, int vfnum, int num)
144 {
145 return ((vfnum * ixgbe_vf_queues(mode)) + num);
146 }
147
148 static inline void
149 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
150 {
151 if (adapter->max_frame_size < max_frame)
152 adapter->max_frame_size = max_frame;
153 }
154
155 inline u32
156 ixgbe_get_mrqc(int iov_mode)
157 {
158 u32 mrqc;
159
160 switch (iov_mode) {
161 case IXGBE_64_VM:
162 mrqc = IXGBE_MRQC_VMDQRSS64EN;
163 break;
164 case IXGBE_32_VM:
165 mrqc = IXGBE_MRQC_VMDQRSS32EN;
166 break;
167 case IXGBE_NO_VM:
168 mrqc = 0;
169 break;
170 default:
171 panic("Unexpected SR-IOV mode %d", iov_mode);
172 }
173
174 return mrqc;
175 }
176
177
178 inline u32
179 ixgbe_get_mtqc(int iov_mode)
180 {
181 uint32_t mtqc;
182
183 switch (iov_mode) {
184 case IXGBE_64_VM:
185 mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
186 break;
187 case IXGBE_32_VM:
188 mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
189 break;
190 case IXGBE_NO_VM:
191 mtqc = IXGBE_MTQC_64Q_1PB;
192 break;
193 default:
194 panic("Unexpected SR-IOV mode %d", iov_mode);
195 }
196
197 return mtqc;
198 }
199
200 void
201 ixgbe_ping_all_vfs(struct adapter *adapter)
202 {
203 struct ixgbe_vf *vf;
204
205 for (int i = 0; i < adapter->num_vfs; i++) {
206 vf = &adapter->vfs[i];
207 if (vf->flags & IXGBE_VF_ACTIVE)
208 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
209 }
210 } /* ixgbe_ping_all_vfs */
211
212
213 static void
214 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
215 uint16_t tag)
216 {
217 struct ixgbe_hw *hw;
218 uint32_t vmolr, vmvir;
219
220 hw = &adapter->hw;
221
222 vf->vlan_tag = tag;
223
224 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
225
226 /* Do not receive packets that pass inexact filters. */
227 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
228
229 /* Disable Multicast Promicuous Mode. */
230 vmolr &= ~IXGBE_VMOLR_MPE;
231
232 /* Accept broadcasts. */
233 vmolr |= IXGBE_VMOLR_BAM;
234
235 if (tag == 0) {
236 /* Accept non-vlan tagged traffic. */
237 vmolr |= IXGBE_VMOLR_AUPE;
238
239 /* Allow VM to tag outgoing traffic; no default tag. */
240 vmvir = 0;
241 } else {
242 /* Require vlan-tagged traffic. */
243 vmolr &= ~IXGBE_VMOLR_AUPE;
244
245 /* Tag all traffic with provided vlan tag. */
246 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
247 }
248 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
249 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
250 } /* ixgbe_vf_set_default_vlan */
251
252
253 static void
254 ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
255 {
256 uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
257 uint16_t mbx_size = hw->mbx.size;
258 uint16_t i;
259
260 IXGBE_CORE_LOCK_ASSERT(adapter);
261
262 for (i = 0; i < mbx_size; ++i)
263 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
264 } /* ixgbe_clear_vfmbmem */
265
266
267 static bool
268 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
269 {
270
271 /*
272 * Frame size compatibility between PF and VF is only a problem on
273 * 82599-based cards. X540 and later support any combination of jumbo
274 * frames on PFs and VFs.
275 */
276 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
277 return (TRUE);
278
279 switch (vf->api_ver) {
280 case IXGBE_API_VER_1_0:
281 case IXGBE_API_VER_UNKNOWN:
282 /*
283 * On legacy (1.0 and older) VF versions, we don't support jumbo
284 * frames on either the PF or the VF.
285 */
286 if (adapter->max_frame_size > ETHER_MAX_LEN ||
287 vf->max_frame_size > ETHER_MAX_LEN)
288 return (FALSE);
289
290 return (TRUE);
291
292 break;
293 case IXGBE_API_VER_1_1:
294 default:
295 /*
296 * 1.1 or later VF versions always work if they aren't using
297 * jumbo frames.
298 */
299 if (vf->max_frame_size <= ETHER_MAX_LEN)
300 return (TRUE);
301
302 /*
303 * Jumbo frames only work with VFs if the PF is also using jumbo
304 * frames.
305 */
306 if (adapter->max_frame_size <= ETHER_MAX_LEN)
307 return (TRUE);
308
309 return (FALSE);
310 }
311 } /* ixgbe_vf_frame_size_compatible */
312
313
314 static void
315 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
316 {
317 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
318
319 // XXX clear multicast addresses
320
321 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
322 ixgbe_clear_vfmbmem(&adapter->hw, vf);
323 ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool));
324
325 vf->api_ver = IXGBE_API_VER_UNKNOWN;
326 } /* ixgbe_process_vf_reset */
327
328
329 static void
330 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
331 {
332 struct ixgbe_hw *hw;
333 uint32_t vf_index, vfte;
334
335 hw = &adapter->hw;
336
337 vf_index = IXGBE_VF_INDEX(vf->pool);
338 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
339 vfte |= IXGBE_VF_BIT(vf->pool);
340 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
341 } /* ixgbe_vf_enable_transmit */
342
343
344 static void
345 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
346 {
347 struct ixgbe_hw *hw;
348 uint32_t vf_index, vfre;
349
350 hw = &adapter->hw;
351
352 vf_index = IXGBE_VF_INDEX(vf->pool);
353 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
354 if (ixgbe_vf_frame_size_compatible(adapter, vf))
355 vfre |= IXGBE_VF_BIT(vf->pool);
356 else
357 vfre &= ~IXGBE_VF_BIT(vf->pool);
358 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
359 } /* ixgbe_vf_enable_receive */
360
361
362 static void
363 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
364 {
365 struct ixgbe_hw *hw;
366 uint32_t ack;
367 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
368
369 hw = &adapter->hw;
370
371 ixgbe_process_vf_reset(adapter, vf);
372
373 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
374 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
375 vf->pool, TRUE);
376 ack = IXGBE_VT_MSGTYPE_ACK;
377 } else
378 ack = IXGBE_VT_MSGTYPE_NACK;
379
380 ixgbe_vf_enable_transmit(adapter, vf);
381 ixgbe_vf_enable_receive(adapter, vf);
382
383 vf->flags |= IXGBE_VF_CTS;
384
385 resp[0] = IXGBE_VF_RESET | ack;
386 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
387 resp[3] = hw->mac.mc_filter_type;
388 hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
389 } /* ixgbe_vf_reset_msg */
390
391
392 static void
393 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
394 {
395 uint8_t *mac;
396
397 mac = (uint8_t*)&msg[1];
398
399 /* Check that the VF has permission to change the MAC address. */
400 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
401 ixgbe_send_vf_nack(adapter, vf, msg[0]);
402 return;
403 }
404
405 if (ixgbe_validate_mac_addr(mac) != 0) {
406 ixgbe_send_vf_nack(adapter, vf, msg[0]);
407 return;
408 }
409
410 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
411
412 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
413 TRUE);
414
415 ixgbe_send_vf_ack(adapter, vf, msg[0]);
416 } /* ixgbe_vf_set_mac */
417
418
419 /*
420 * VF multicast addresses are set by using the appropriate bit in
421 * 1 of 128 32 bit addresses (4096 possible).
422 */
423 static void
424 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
425 {
426 u16 *list = (u16*)&msg[1];
427 int entries;
428 u32 vmolr, vec_bit, vec_reg, mta_reg;
429
430 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
431 entries = uimin(entries, IXGBE_MAX_VF_MC);
432
433 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
434
435 vf->num_mc_hashes = entries;
436
437 /* Set the appropriate MTA bit */
438 for (int i = 0; i < entries; i++) {
439 vf->mc_hash[i] = list[i];
440 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
441 vec_bit = vf->mc_hash[i] & 0x1F;
442 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
443 mta_reg |= (1 << vec_bit);
444 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
445 }
446
447 vmolr |= IXGBE_VMOLR_ROMPE;
448 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
449 ixgbe_send_vf_ack(adapter, vf, msg[0]);
450 } /* ixgbe_vf_set_mc_addr */
451
452
453 static void
454 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
455 {
456 struct ixgbe_hw *hw;
457 int enable;
458 uint16_t tag;
459
460 hw = &adapter->hw;
461 enable = IXGBE_VT_MSGINFO(msg[0]);
462 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
463
464 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
465 ixgbe_send_vf_nack(adapter, vf, msg[0]);
466 return;
467 }
468
469 /* It is illegal to enable vlan tag 0. */
470 if (tag == 0 && enable != 0) {
471 ixgbe_send_vf_nack(adapter, vf, msg[0]);
472 return;
473 }
474
475 ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
476 ixgbe_send_vf_ack(adapter, vf, msg[0]);
477 } /* ixgbe_vf_set_vlan */
478
479
480 static void
481 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
482 {
483 struct ixgbe_hw *hw;
484 uint32_t vf_max_size, pf_max_size, mhadd;
485
486 hw = &adapter->hw;
487 vf_max_size = msg[1];
488
489 if (vf_max_size < ETHER_CRC_LEN) {
490 /* We intentionally ACK invalid LPE requests. */
491 ixgbe_send_vf_ack(adapter, vf, msg[0]);
492 return;
493 }
494
495 vf_max_size -= ETHER_CRC_LEN;
496
497 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
498 /* We intentionally ACK invalid LPE requests. */
499 ixgbe_send_vf_ack(adapter, vf, msg[0]);
500 return;
501 }
502
503 vf->max_frame_size = vf_max_size;
504 ixgbe_update_max_frame(adapter, vf->max_frame_size);
505
506 /*
507 * We might have to disable reception to this VF if the frame size is
508 * not compatible with the config on the PF.
509 */
510 ixgbe_vf_enable_receive(adapter, vf);
511
512 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
513 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
514
515 if (pf_max_size < adapter->max_frame_size) {
516 mhadd &= ~IXGBE_MHADD_MFS_MASK;
517 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
518 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
519 }
520
521 ixgbe_send_vf_ack(adapter, vf, msg[0]);
522 } /* ixgbe_vf_set_lpe */
523
524
525 static void
526 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
527 uint32_t *msg)
528 {
529 //XXX implement this
530 ixgbe_send_vf_nack(adapter, vf, msg[0]);
531 } /* ixgbe_vf_set_macvlan */
532
533
534 static void
535 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
536 uint32_t *msg)
537 {
538
539 switch (msg[1]) {
540 case IXGBE_API_VER_1_0:
541 case IXGBE_API_VER_1_1:
542 vf->api_ver = msg[1];
543 ixgbe_send_vf_ack(adapter, vf, msg[0]);
544 break;
545 default:
546 vf->api_ver = IXGBE_API_VER_UNKNOWN;
547 ixgbe_send_vf_nack(adapter, vf, msg[0]);
548 break;
549 }
550 } /* ixgbe_vf_api_negotiate */
551
552
553 static void
554 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
555 {
556 struct ixgbe_hw *hw;
557 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
558 int num_queues;
559
560 hw = &adapter->hw;
561
562 /* GET_QUEUES is not supported on pre-1.1 APIs. */
563 switch (msg[0]) {
564 case IXGBE_API_VER_1_0:
565 case IXGBE_API_VER_UNKNOWN:
566 ixgbe_send_vf_nack(adapter, vf, msg[0]);
567 return;
568 }
569
570 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
571 IXGBE_VT_MSGTYPE_CTS;
572
573 num_queues = ixgbe_vf_queues(adapter->iov_mode);
574 resp[IXGBE_VF_TX_QUEUES] = num_queues;
575 resp[IXGBE_VF_RX_QUEUES] = num_queues;
576 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
577 resp[IXGBE_VF_DEF_QUEUE] = 0;
578
579 hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
580 } /* ixgbe_vf_get_queues */
581
582
583 static void
584 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
585 {
586 struct ixgbe_hw *hw;
587 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
588 int error;
589
590 hw = &adapter->hw;
591
592 error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
593
594 if (error != 0)
595 return;
596
597 CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
598 msg[0], vf->pool);
599 if (msg[0] == IXGBE_VF_RESET) {
600 ixgbe_vf_reset_msg(adapter, vf, msg);
601 return;
602 }
603
604 if (!(vf->flags & IXGBE_VF_CTS)) {
605 ixgbe_send_vf_nack(adapter, vf, msg[0]);
606 return;
607 }
608
609 switch (msg[0] & IXGBE_VT_MSG_MASK) {
610 case IXGBE_VF_SET_MAC_ADDR:
611 ixgbe_vf_set_mac(adapter, vf, msg);
612 break;
613 case IXGBE_VF_SET_MULTICAST:
614 ixgbe_vf_set_mc_addr(adapter, vf, msg);
615 break;
616 case IXGBE_VF_SET_VLAN:
617 ixgbe_vf_set_vlan(adapter, vf, msg);
618 break;
619 case IXGBE_VF_SET_LPE:
620 ixgbe_vf_set_lpe(adapter, vf, msg);
621 break;
622 case IXGBE_VF_SET_MACVLAN:
623 ixgbe_vf_set_macvlan(adapter, vf, msg);
624 break;
625 case IXGBE_VF_API_NEGOTIATE:
626 ixgbe_vf_api_negotiate(adapter, vf, msg);
627 break;
628 case IXGBE_VF_GET_QUEUES:
629 ixgbe_vf_get_queues(adapter, vf, msg);
630 break;
631 default:
632 ixgbe_send_vf_nack(adapter, vf, msg[0]);
633 }
634 } /* ixgbe_process_vf_msg */
635
636
637 /* Tasklet for handling VF -> PF mailbox messages */
638 void
639 ixgbe_handle_mbx(void *context, int pending)
640 {
641 struct adapter *adapter = context;
642 struct ixgbe_hw *hw;
643 struct ixgbe_vf *vf;
644 int i;
645
646 KASSERT(mutex_owned(&adapter->core_mtx));
647
648 hw = &adapter->hw;
649
650 for (i = 0; i < adapter->num_vfs; i++) {
651 vf = &adapter->vfs[i];
652
653 if (vf->flags & IXGBE_VF_ACTIVE) {
654 if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
655 ixgbe_process_vf_reset(adapter, vf);
656
657 if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
658 ixgbe_process_vf_msg(adapter, vf);
659
660 if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
661 ixgbe_process_vf_ack(adapter, vf);
662 }
663 }
664 } /* ixgbe_handle_mbx */
665
666 int
667 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
668 {
669 struct adapter *adapter;
670 int retval = 0;
671
672 adapter = device_get_softc(dev);
673 adapter->iov_mode = IXGBE_NO_VM;
674
675 if (num_vfs == 0) {
676 /* Would we ever get num_vfs = 0? */
677 retval = EINVAL;
678 goto err_init_iov;
679 }
680
681 /*
682 * We've got to reserve a VM's worth of queues for the PF,
683 * thus we go into "64 VF mode" if 32+ VFs are requested.
684 * With 64 VFs, you can only have two queues per VF.
685 * With 32 VFs, you can have up to four queues per VF.
686 */
687 if (num_vfs >= IXGBE_32_VM)
688 adapter->iov_mode = IXGBE_64_VM;
689 else
690 adapter->iov_mode = IXGBE_32_VM;
691
692 /* Again, reserving 1 VM's worth of queues for the PF */
693 adapter->pool = adapter->iov_mode - 1;
694
695 if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
696 retval = ENOSPC;
697 goto err_init_iov;
698 }
699
700 IXGBE_CORE_LOCK(adapter);
701
702 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
703 M_NOWAIT | M_ZERO);
704
705 if (adapter->vfs == NULL) {
706 retval = ENOMEM;
707 IXGBE_CORE_UNLOCK(adapter);
708 goto err_init_iov;
709 }
710
711 adapter->num_vfs = num_vfs;
712
713 /* set the SRIOV flag now as it's needed
714 * by ixgbe_init_locked() */
715 adapter->feat_en |= IXGBE_FEATURE_SRIOV;
716 adapter->init_locked(adapter);
717
718 IXGBE_CORE_UNLOCK(adapter);
719
720 return (retval);
721
722 err_init_iov:
723 adapter->num_vfs = 0;
724 adapter->pool = 0;
725 adapter->iov_mode = IXGBE_NO_VM;
726
727 return (retval);
728 } /* ixgbe_init_iov */
729
730 void
731 ixgbe_uninit_iov(device_t dev)
732 {
733 struct ixgbe_hw *hw;
734 struct adapter *adapter;
735 uint32_t pf_reg, vf_reg;
736
737 adapter = device_get_softc(dev);
738 hw = &adapter->hw;
739
740 IXGBE_CORE_LOCK(adapter);
741
742 /* Enable rx/tx for the PF and disable it for all VFs. */
743 pf_reg = IXGBE_VF_INDEX(adapter->pool);
744 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
745 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
746
747 if (pf_reg == 0)
748 vf_reg = 1;
749 else
750 vf_reg = 0;
751 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
752 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
753
754 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
755
756 free(adapter->vfs, M_IXGBE_SRIOV);
757 adapter->vfs = NULL;
758 adapter->num_vfs = 0;
759 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
760
761 IXGBE_CORE_UNLOCK(adapter);
762 } /* ixgbe_uninit_iov */
763
764 static void
765 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
766 {
767 struct ixgbe_hw *hw;
768 uint32_t vf_index, pfmbimr;
769
770 IXGBE_CORE_LOCK_ASSERT(adapter);
771
772 hw = &adapter->hw;
773
774 if (!(vf->flags & IXGBE_VF_ACTIVE))
775 return;
776
777 vf_index = IXGBE_VF_INDEX(vf->pool);
778 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
779 pfmbimr |= IXGBE_VF_BIT(vf->pool);
780 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
781
782 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
783
784 // XXX multicast addresses
785
786 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
787 ixgbe_set_rar(&adapter->hw, vf->rar_index,
788 vf->ether_addr, vf->pool, TRUE);
789 }
790
791 ixgbe_vf_enable_transmit(adapter, vf);
792 ixgbe_vf_enable_receive(adapter, vf);
793
794 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
795 } /* ixgbe_init_vf */
796
797 void
798 ixgbe_initialize_iov(struct adapter *adapter)
799 {
800 struct ixgbe_hw *hw = &adapter->hw;
801 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
802 int i;
803
804 if (adapter->iov_mode == IXGBE_NO_VM)
805 return;
806
807 IXGBE_CORE_LOCK_ASSERT(adapter);
808
809 /* RMW appropriate registers based on IOV mode */
810 /* Read... */
811 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
812 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
813 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
814 /* Modify... */
815 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
816 mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
817 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
818 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
819 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
820 switch (adapter->iov_mode) {
821 case IXGBE_64_VM:
822 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
823 mtqc |= IXGBE_MTQC_64VF;
824 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
825 gpie |= IXGBE_GPIE_VTMODE_64;
826 break;
827 case IXGBE_32_VM:
828 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
829 mtqc |= IXGBE_MTQC_32VF;
830 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
831 gpie |= IXGBE_GPIE_VTMODE_32;
832 break;
833 default:
834 panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
835 }
836 /* Write... */
837 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
838 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
839 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
840 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
841
842 /* Enable rx/tx for the PF. */
843 vf_reg = IXGBE_VF_INDEX(adapter->pool);
844 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
845 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
846
847 /* Allow VM-to-VM communication. */
848 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
849
850 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
851 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
852 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
853
854 for (i = 0; i < adapter->num_vfs; i++)
855 ixgbe_init_vf(adapter, &adapter->vfs[i]);
856 } /* ixgbe_initialize_iov */
857
858
859 /* Check the max frame setting of all active VF's */
860 void
861 ixgbe_recalculate_max_frame(struct adapter *adapter)
862 {
863 struct ixgbe_vf *vf;
864
865 IXGBE_CORE_LOCK_ASSERT(adapter);
866
867 for (int i = 0; i < adapter->num_vfs; i++) {
868 vf = &adapter->vfs[i];
869 if (vf->flags & IXGBE_VF_ACTIVE)
870 ixgbe_update_max_frame(adapter, vf->max_frame_size);
871 }
872 } /* ixgbe_recalculate_max_frame */
873
874 int
875 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
876 {
877 struct adapter *adapter;
878 struct ixgbe_vf *vf;
879 const void *mac;
880
881 adapter = device_get_softc(dev);
882
883 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
884 vfnum, adapter->num_vfs));
885
886 IXGBE_CORE_LOCK(adapter);
887 vf = &adapter->vfs[vfnum];
888 vf->pool= vfnum;
889
890 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
891 vf->rar_index = vfnum + 1;
892 vf->default_vlan = 0;
893 vf->max_frame_size = ETHER_MAX_LEN;
894 ixgbe_update_max_frame(adapter, vf->max_frame_size);
895
896 if (nvlist_exists_binary(config, "mac-addr")) {
897 mac = nvlist_get_binary(config, "mac-addr", NULL);
898 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
899 if (nvlist_get_bool(config, "allow-set-mac"))
900 vf->flags |= IXGBE_VF_CAP_MAC;
901 } else
902 /*
903 * If the administrator has not specified a MAC address then
904 * we must allow the VF to choose one.
905 */
906 vf->flags |= IXGBE_VF_CAP_MAC;
907
908 vf->flags |= IXGBE_VF_ACTIVE;
909
910 ixgbe_init_vf(adapter, vf);
911 IXGBE_CORE_UNLOCK(adapter);
912
913 return (0);
914 } /* ixgbe_add_vf */
915
916 #else
917
918 void
919 ixgbe_handle_mbx(void *context, int pending)
920 {
921 UNREFERENCED_2PARAMETER(context, pending);
922 } /* ixgbe_handle_mbx */
923
924 inline int
925 ixgbe_vf_que_index(int mode, int vfnum, int num)
926 {
927 UNREFERENCED_2PARAMETER(mode, vfnum);
928
929 return num;
930 } /* ixgbe_vf_que_index */
931
932 #endif
933