1 /* $NetBSD: spectre.c,v 1.36 2021/10/07 12:52:27 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2018-2019 NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Maxime Villard. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Mitigations for the SpectreV2, SpectreV4, MDS and TAA CPU flaws. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: spectre.c,v 1.36 2021/10/07 12:52:27 msaitoh Exp $"); 38 39 #include "opt_spectre.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/cpu.h> 44 #include <sys/sysctl.h> 45 #include <sys/xcall.h> 46 47 #include <machine/cpufunc.h> 48 #include <machine/cpuvar.h> 49 #include <machine/specialreg.h> 50 #include <machine/frameasm.h> 51 52 #include <x86/cputypes.h> 53 54 enum v2_mitigation { 55 V2_MITIGATION_NONE, 56 V2_MITIGATION_AMD_DIS_IND, 57 V2_MITIGATION_INTEL_IBRS, 58 V2_MITIGATION_INTEL_ENHANCED_IBRS 59 }; 60 61 enum v4_mitigation { 62 V4_MITIGATION_NONE, 63 V4_MITIGATION_INTEL_SSBD, 64 V4_MITIGATION_INTEL_SSB_NO, 65 V4_MITIGATION_AMD_SSB_NO, 66 V4_MITIGATION_AMD_NONARCH_F15H, 67 V4_MITIGATION_AMD_NONARCH_F16H, 68 V4_MITIGATION_AMD_NONARCH_F17H 69 }; 70 71 static enum v2_mitigation v2_mitigation_method = V2_MITIGATION_NONE; 72 static enum v4_mitigation v4_mitigation_method = V4_MITIGATION_NONE; 73 74 static bool v2_mitigation_enabled __read_mostly = false; 75 static bool v4_mitigation_enabled __read_mostly = false; 76 77 static char v2_mitigation_name[64] = "(none)"; 78 static char v4_mitigation_name[64] = "(none)"; 79 80 /* --------------------------------------------------------------------- */ 81 82 static void 83 v2_set_name(void) 84 { 85 char name[64] = ""; 86 size_t nmitig = 0; 87 88 #if defined(SPECTRE_V2_GCC_MITIGATION) 89 strlcat(name, "[GCC retpoline]", sizeof(name)); 90 nmitig++; 91 #endif 92 93 if (!v2_mitigation_enabled) { 94 if (nmitig == 0) 95 strlcat(name, "(none)", sizeof(name)); 96 } else { 97 if (nmitig) 98 strlcat(name, " + ", sizeof(name)); 99 switch (v2_mitigation_method) { 100 case V2_MITIGATION_AMD_DIS_IND: 101 strlcat(name, "[AMD DIS_IND]", sizeof(name)); 102 break; 103 case V2_MITIGATION_INTEL_IBRS: 104 strlcat(name, "[Intel IBRS]", sizeof(name)); 105 break; 106 case V2_MITIGATION_INTEL_ENHANCED_IBRS: 107 strlcat(name, "[Intel Enhanced IBRS]", sizeof(name)); 108 break; 109 default: 110 panic("%s: impossible", __func__); 111 } 112 } 113 114 strlcpy(v2_mitigation_name, name, 115 sizeof(v2_mitigation_name)); 116 } 117 118 static void 119 v2_detect_method(void) 120 { 121 struct cpu_info *ci = curcpu(); 122 u_int descs[4]; 123 uint64_t msr; 124 125 if (cpu_vendor == CPUVENDOR_INTEL) { 126 if (cpuid_level >= 7) { 127 x86_cpuid(7, descs); 128 129 if (descs[3] & CPUID_SEF_IBRS) { 130 if (descs[3] & CPUID_SEF_ARCH_CAP) { 131 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 132 if (msr & IA32_ARCH_IBRS_ALL) { 133 v2_mitigation_method = 134 V2_MITIGATION_INTEL_ENHANCED_IBRS; 135 return; 136 } 137 } 138 #ifdef __x86_64__ 139 v2_mitigation_method = V2_MITIGATION_INTEL_IBRS; 140 return; 141 #endif 142 } 143 } 144 v2_mitigation_method = V2_MITIGATION_NONE; 145 } else if (cpu_vendor == CPUVENDOR_AMD) { 146 /* 147 * The AMD Family 10h manual documents the IC_CFG.DIS_IND bit. 148 * This bit disables the Indirect Branch Predictor. 149 * 150 * Families 12h and 16h are believed to have this bit too, but 151 * their manuals don't document it. 152 */ 153 switch (CPUID_TO_FAMILY(ci->ci_signature)) { 154 case 0x10: 155 v2_mitigation_method = V2_MITIGATION_AMD_DIS_IND; 156 break; 157 default: 158 v2_mitigation_method = V2_MITIGATION_NONE; 159 break; 160 } 161 } else { 162 v2_mitigation_method = V2_MITIGATION_NONE; 163 } 164 } 165 166 /* -------------------------------------------------------------------------- */ 167 168 static volatile unsigned long ibrs_cpu_barrier1 __cacheline_aligned; 169 static volatile unsigned long ibrs_cpu_barrier2 __cacheline_aligned; 170 171 #ifdef __x86_64__ 172 /* IBRS_ENTER. */ 173 extern uint8_t noibrs_enter, noibrs_enter_end; 174 extern uint8_t ibrs_enter, ibrs_enter_end; 175 static const struct x86_hotpatch_source hp_noibrs_enter_source = { 176 .saddr = &noibrs_enter, 177 .eaddr = &noibrs_enter_end 178 }; 179 static const struct x86_hotpatch_source hp_ibrs_enter_source = { 180 .saddr = &ibrs_enter, 181 .eaddr = &ibrs_enter_end 182 }; 183 static const struct x86_hotpatch_descriptor hp_ibrs_enter_desc = { 184 .name = HP_NAME_IBRS_ENTER, 185 .nsrc = 2, 186 .srcs = { &hp_noibrs_enter_source, &hp_ibrs_enter_source } 187 }; 188 __link_set_add_rodata(x86_hotpatch_descriptors, hp_ibrs_enter_desc); 189 190 /* IBRS_LEAVE. */ 191 extern uint8_t noibrs_leave, noibrs_leave_end; 192 extern uint8_t ibrs_leave, ibrs_leave_end; 193 static const struct x86_hotpatch_source hp_noibrs_leave_source = { 194 .saddr = &noibrs_leave, 195 .eaddr = &noibrs_leave_end 196 }; 197 static const struct x86_hotpatch_source hp_ibrs_leave_source = { 198 .saddr = &ibrs_leave, 199 .eaddr = &ibrs_leave_end 200 }; 201 static const struct x86_hotpatch_descriptor hp_ibrs_leave_desc = { 202 .name = HP_NAME_IBRS_LEAVE, 203 .nsrc = 2, 204 .srcs = { &hp_noibrs_leave_source, &hp_ibrs_leave_source } 205 }; 206 __link_set_add_rodata(x86_hotpatch_descriptors, hp_ibrs_leave_desc); 207 208 static void 209 ibrs_disable_hotpatch(void) 210 { 211 x86_hotpatch(HP_NAME_IBRS_ENTER, /* noibrs */ 0); 212 x86_hotpatch(HP_NAME_IBRS_LEAVE, /* noibrs */ 0); 213 } 214 215 static void 216 ibrs_enable_hotpatch(void) 217 { 218 x86_hotpatch(HP_NAME_IBRS_ENTER, /* ibrs */ 1); 219 x86_hotpatch(HP_NAME_IBRS_LEAVE, /* ibrs */ 1); 220 } 221 #else 222 /* IBRS not supported on i386 */ 223 static void 224 ibrs_disable_hotpatch(void) 225 { 226 panic("%s: impossible", __func__); 227 } 228 static void 229 ibrs_enable_hotpatch(void) 230 { 231 panic("%s: impossible", __func__); 232 } 233 #endif 234 235 /* -------------------------------------------------------------------------- */ 236 237 static void 238 mitigation_v2_apply_cpu(struct cpu_info *ci, bool enabled) 239 { 240 uint64_t msr; 241 242 switch (v2_mitigation_method) { 243 case V2_MITIGATION_NONE: 244 panic("impossible"); 245 case V2_MITIGATION_INTEL_IBRS: 246 /* cpu0 is the one that does the hotpatch job */ 247 if (ci == &cpu_info_primary) { 248 if (enabled) { 249 ibrs_enable_hotpatch(); 250 } else { 251 ibrs_disable_hotpatch(); 252 } 253 } 254 if (!enabled) { 255 wrmsr(MSR_IA32_SPEC_CTRL, 0); 256 } 257 break; 258 case V2_MITIGATION_INTEL_ENHANCED_IBRS: 259 msr = rdmsr(MSR_IA32_SPEC_CTRL); 260 if (enabled) { 261 msr |= IA32_SPEC_CTRL_IBRS; 262 } else { 263 msr &= ~IA32_SPEC_CTRL_IBRS; 264 } 265 wrmsr(MSR_IA32_SPEC_CTRL, msr); 266 break; 267 case V2_MITIGATION_AMD_DIS_IND: 268 msr = rdmsr(MSR_IC_CFG); 269 if (enabled) { 270 msr |= IC_CFG_DIS_IND; 271 } else { 272 msr &= ~IC_CFG_DIS_IND; 273 } 274 wrmsr(MSR_IC_CFG, msr); 275 break; 276 } 277 } 278 279 /* 280 * Note: IBRS requires hotpatching, so we need barriers. 281 */ 282 static void 283 mitigation_v2_change_cpu(void *arg1, void *arg2) 284 { 285 struct cpu_info *ci = curcpu(); 286 bool enabled = arg1 != NULL; 287 u_long psl = 0; 288 289 /* Rendez-vous 1 (IBRS only). */ 290 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) { 291 psl = x86_read_psl(); 292 x86_disable_intr(); 293 294 atomic_dec_ulong(&ibrs_cpu_barrier1); 295 while (atomic_cas_ulong(&ibrs_cpu_barrier1, 0, 0) != 0) { 296 x86_pause(); 297 } 298 } 299 300 mitigation_v2_apply_cpu(ci, enabled); 301 302 /* Rendez-vous 2 (IBRS only). */ 303 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) { 304 atomic_dec_ulong(&ibrs_cpu_barrier2); 305 while (atomic_cas_ulong(&ibrs_cpu_barrier2, 0, 0) != 0) { 306 x86_pause(); 307 } 308 309 /* Write back and invalidate cache, flush pipelines. */ 310 wbinvd(); 311 x86_flush(); 312 313 x86_write_psl(psl); 314 } 315 } 316 317 static int 318 mitigation_v2_change(bool enabled) 319 { 320 uint64_t xc; 321 322 v2_detect_method(); 323 324 switch (v2_mitigation_method) { 325 case V2_MITIGATION_NONE: 326 printf("[!] No mitigation available\n"); 327 return EOPNOTSUPP; 328 case V2_MITIGATION_AMD_DIS_IND: 329 case V2_MITIGATION_INTEL_IBRS: 330 case V2_MITIGATION_INTEL_ENHANCED_IBRS: 331 /* Initialize the barriers */ 332 ibrs_cpu_barrier1 = ncpu; 333 ibrs_cpu_barrier2 = ncpu; 334 335 printf("[+] %s SpectreV2 Mitigation...", 336 enabled ? "Enabling" : "Disabling"); 337 xc = xc_broadcast(XC_HIGHPRI, mitigation_v2_change_cpu, 338 (void *)enabled, NULL); 339 xc_wait(xc); 340 printf(" done!\n"); 341 v2_mitigation_enabled = enabled; 342 v2_set_name(); 343 return 0; 344 default: 345 panic("impossible"); 346 } 347 } 348 349 static int 350 sysctl_machdep_spectreV2_mitigated(SYSCTLFN_ARGS) 351 { 352 struct sysctlnode node; 353 int error; 354 bool val; 355 356 val = *(bool *)rnode->sysctl_data; 357 358 node = *rnode; 359 node.sysctl_data = &val; 360 361 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 362 if (error != 0 || newp == NULL) 363 return error; 364 365 if (val == v2_mitigation_enabled) 366 return 0; 367 return mitigation_v2_change(val); 368 } 369 370 /* -------------------------------------------------------------------------- */ 371 372 static void 373 v4_set_name(void) 374 { 375 char name[64] = ""; 376 377 if (!v4_mitigation_enabled) { 378 strlcat(name, "(none)", sizeof(name)); 379 } else { 380 switch (v4_mitigation_method) { 381 case V4_MITIGATION_NONE: 382 panic("%s: impossible", __func__); 383 case V4_MITIGATION_INTEL_SSBD: 384 strlcat(name, "[Intel SSBD]", sizeof(name)); 385 break; 386 case V4_MITIGATION_INTEL_SSB_NO: 387 strlcat(name, "[Intel SSB_NO]", sizeof(name)); 388 break; 389 case V4_MITIGATION_AMD_SSB_NO: 390 strlcat(name, "[AMD SSB_NO]", sizeof(name)); 391 break; 392 case V4_MITIGATION_AMD_NONARCH_F15H: 393 case V4_MITIGATION_AMD_NONARCH_F16H: 394 case V4_MITIGATION_AMD_NONARCH_F17H: 395 strlcat(name, "[AMD NONARCH]", sizeof(name)); 396 break; 397 } 398 } 399 400 strlcpy(v4_mitigation_name, name, 401 sizeof(v4_mitigation_name)); 402 } 403 404 static void 405 v4_detect_method(void) 406 { 407 struct cpu_info *ci = curcpu(); 408 u_int descs[4]; 409 uint64_t msr; 410 411 if (cpu_vendor == CPUVENDOR_INTEL) { 412 if (cpu_info_primary.ci_feat_val[7] & CPUID_SEF_ARCH_CAP) { 413 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 414 if (msr & IA32_ARCH_SSB_NO) { 415 /* Not vulnerable to SpectreV4. */ 416 v4_mitigation_method = V4_MITIGATION_INTEL_SSB_NO; 417 return; 418 } 419 } 420 if (cpuid_level >= 7) { 421 x86_cpuid(7, descs); 422 if (descs[3] & CPUID_SEF_SSBD) { 423 /* descs[3] = %edx */ 424 v4_mitigation_method = V4_MITIGATION_INTEL_SSBD; 425 return; 426 } 427 } 428 } else if (cpu_vendor == CPUVENDOR_AMD) { 429 switch (CPUID_TO_FAMILY(ci->ci_signature)) { 430 case 0x15: 431 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F15H; 432 return; 433 case 0x16: 434 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F16H; 435 return; 436 case 0x17: 437 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F17H; 438 return; 439 default: 440 if (cpu_info_primary.ci_max_ext_cpuid < 0x80000008) { 441 break; 442 } 443 x86_cpuid(0x80000008, descs); 444 if (descs[1] & CPUID_CAPEX_SSB_NO) { 445 /* Not vulnerable to SpectreV4. */ 446 v4_mitigation_method = V4_MITIGATION_AMD_SSB_NO; 447 return; 448 } 449 450 break; 451 } 452 } 453 454 v4_mitigation_method = V4_MITIGATION_NONE; 455 } 456 457 static void 458 mitigation_v4_apply_cpu(bool enabled) 459 { 460 uint64_t msr, msrval = 0, msrbit = 0; 461 462 switch (v4_mitigation_method) { 463 case V4_MITIGATION_NONE: 464 case V4_MITIGATION_INTEL_SSB_NO: 465 case V4_MITIGATION_AMD_SSB_NO: 466 panic("impossible"); 467 case V4_MITIGATION_INTEL_SSBD: 468 msrval = MSR_IA32_SPEC_CTRL; 469 msrbit = IA32_SPEC_CTRL_SSBD; 470 break; 471 case V4_MITIGATION_AMD_NONARCH_F15H: 472 msrval = MSR_LS_CFG; 473 msrbit = LS_CFG_DIS_SSB_F15H; 474 break; 475 case V4_MITIGATION_AMD_NONARCH_F16H: 476 msrval = MSR_LS_CFG; 477 msrbit = LS_CFG_DIS_SSB_F16H; 478 break; 479 case V4_MITIGATION_AMD_NONARCH_F17H: 480 msrval = MSR_LS_CFG; 481 msrbit = LS_CFG_DIS_SSB_F17H; 482 break; 483 } 484 485 msr = rdmsr(msrval); 486 if (enabled) { 487 msr |= msrbit; 488 } else { 489 msr &= ~msrbit; 490 } 491 wrmsr(msrval, msr); 492 } 493 494 static void 495 mitigation_v4_change_cpu(void *arg1, void *arg2) 496 { 497 bool enabled = arg1 != NULL; 498 499 mitigation_v4_apply_cpu(enabled); 500 } 501 502 static int 503 mitigation_v4_change(bool enabled) 504 { 505 uint64_t xc; 506 507 v4_detect_method(); 508 509 switch (v4_mitigation_method) { 510 case V4_MITIGATION_NONE: 511 printf("[!] No mitigation available\n"); 512 return EOPNOTSUPP; 513 case V4_MITIGATION_INTEL_SSBD: 514 case V4_MITIGATION_AMD_NONARCH_F15H: 515 case V4_MITIGATION_AMD_NONARCH_F16H: 516 case V4_MITIGATION_AMD_NONARCH_F17H: 517 printf("[+] %s SpectreV4 Mitigation...", 518 enabled ? "Enabling" : "Disabling"); 519 xc = xc_broadcast(0, mitigation_v4_change_cpu, 520 (void *)enabled, NULL); 521 xc_wait(xc); 522 printf(" done!\n"); 523 v4_mitigation_enabled = enabled; 524 v4_set_name(); 525 return 0; 526 case V4_MITIGATION_INTEL_SSB_NO: 527 case V4_MITIGATION_AMD_SSB_NO: 528 printf("[+] The CPU is not affected by SpectreV4\n"); 529 return 0; 530 default: 531 panic("impossible"); 532 } 533 } 534 535 static int 536 sysctl_machdep_spectreV4_mitigated(SYSCTLFN_ARGS) 537 { 538 struct sysctlnode node; 539 int error; 540 bool val; 541 542 val = *(bool *)rnode->sysctl_data; 543 544 node = *rnode; 545 node.sysctl_data = &val; 546 547 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 548 if (error != 0 || newp == NULL) 549 return error; 550 551 if (val == v4_mitigation_enabled) 552 return 0; 553 return mitigation_v4_change(val); 554 } 555 556 /* -------------------------------------------------------------------------- */ 557 558 enum mds_mitigation { 559 MDS_MITIGATION_NONE, 560 MDS_MITIGATION_VERW, 561 MDS_MITIGATION_MDS_NO 562 }; 563 564 static char mds_mitigation_name[64] = "(none)"; 565 566 static enum mds_mitigation mds_mitigation_method = MDS_MITIGATION_NONE; 567 static bool mds_mitigation_enabled __read_mostly = false; 568 569 static volatile unsigned long mds_cpu_barrier1 __cacheline_aligned; 570 static volatile unsigned long mds_cpu_barrier2 __cacheline_aligned; 571 572 #ifdef __x86_64__ 573 /* MDS_LEAVE. */ 574 extern uint8_t nomds_leave, nomds_leave_end; 575 extern uint8_t mds_leave, mds_leave_end; 576 static const struct x86_hotpatch_source hp_nomds_leave_source = { 577 .saddr = &nomds_leave, 578 .eaddr = &nomds_leave_end 579 }; 580 static const struct x86_hotpatch_source hp_mds_leave_source = { 581 .saddr = &mds_leave, 582 .eaddr = &mds_leave_end 583 }; 584 static const struct x86_hotpatch_descriptor hp_mds_leave_desc = { 585 .name = HP_NAME_MDS_LEAVE, 586 .nsrc = 2, 587 .srcs = { &hp_nomds_leave_source, &hp_mds_leave_source } 588 }; 589 __link_set_add_rodata(x86_hotpatch_descriptors, hp_mds_leave_desc); 590 591 static void 592 mds_disable_hotpatch(void) 593 { 594 x86_hotpatch(HP_NAME_MDS_LEAVE, /* nomds */ 0); 595 } 596 597 static void 598 mds_enable_hotpatch(void) 599 { 600 x86_hotpatch(HP_NAME_MDS_LEAVE, /* mds */ 1); 601 } 602 #else 603 /* MDS not supported on i386 */ 604 static void 605 mds_disable_hotpatch(void) 606 { 607 panic("%s: impossible", __func__); 608 } 609 static void 610 mds_enable_hotpatch(void) 611 { 612 panic("%s: impossible", __func__); 613 } 614 #endif 615 616 static void 617 mitigation_mds_apply_cpu(struct cpu_info *ci, bool enabled) 618 { 619 switch (mds_mitigation_method) { 620 case MDS_MITIGATION_NONE: 621 case MDS_MITIGATION_MDS_NO: 622 panic("impossible"); 623 case MDS_MITIGATION_VERW: 624 /* cpu0 is the one that does the hotpatch job */ 625 if (ci == &cpu_info_primary) { 626 if (enabled) { 627 mds_enable_hotpatch(); 628 } else { 629 mds_disable_hotpatch(); 630 } 631 } 632 break; 633 } 634 } 635 636 static void 637 mitigation_mds_change_cpu(void *arg1, void *arg2) 638 { 639 struct cpu_info *ci = curcpu(); 640 bool enabled = arg1 != NULL; 641 u_long psl = 0; 642 643 /* Rendez-vous 1. */ 644 psl = x86_read_psl(); 645 x86_disable_intr(); 646 647 atomic_dec_ulong(&mds_cpu_barrier1); 648 while (atomic_cas_ulong(&mds_cpu_barrier1, 0, 0) != 0) { 649 x86_pause(); 650 } 651 652 mitigation_mds_apply_cpu(ci, enabled); 653 654 /* Rendez-vous 2. */ 655 atomic_dec_ulong(&mds_cpu_barrier2); 656 while (atomic_cas_ulong(&mds_cpu_barrier2, 0, 0) != 0) { 657 x86_pause(); 658 } 659 660 /* Write back and invalidate cache, flush pipelines. */ 661 wbinvd(); 662 x86_flush(); 663 664 x86_write_psl(psl); 665 } 666 667 static void 668 mds_detect_method(void) 669 { 670 u_int descs[4]; 671 uint64_t msr; 672 673 if (cpu_vendor != CPUVENDOR_INTEL) { 674 mds_mitigation_method = MDS_MITIGATION_MDS_NO; 675 return; 676 } 677 678 if (cpuid_level < 7) { 679 return; 680 } 681 682 x86_cpuid(0x7, descs); 683 if (descs[3] & CPUID_SEF_ARCH_CAP) { 684 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 685 if (msr & IA32_ARCH_MDS_NO) { 686 mds_mitigation_method = MDS_MITIGATION_MDS_NO; 687 return; 688 } 689 } 690 691 #ifdef __x86_64__ 692 if (descs[3] & CPUID_SEF_MD_CLEAR) { 693 mds_mitigation_method = MDS_MITIGATION_VERW; 694 } 695 #endif 696 } 697 698 static void 699 mds_set_name(void) 700 { 701 char name[64] = ""; 702 703 if (!mds_mitigation_enabled) { 704 strlcat(name, "(none)", sizeof(name)); 705 } else { 706 switch (mds_mitigation_method) { 707 case MDS_MITIGATION_NONE: 708 panic("%s: impossible", __func__); 709 case MDS_MITIGATION_MDS_NO: 710 strlcat(name, "[MDS_NO]", sizeof(name)); 711 break; 712 case MDS_MITIGATION_VERW: 713 strlcat(name, "[VERW]", sizeof(name)); 714 break; 715 } 716 } 717 718 strlcpy(mds_mitigation_name, name, 719 sizeof(mds_mitigation_name)); 720 } 721 722 static int 723 mitigation_mds_change(bool enabled) 724 { 725 uint64_t xc; 726 727 mds_detect_method(); 728 729 switch (mds_mitigation_method) { 730 case MDS_MITIGATION_NONE: 731 printf("[!] No mitigation available\n"); 732 return EOPNOTSUPP; 733 case MDS_MITIGATION_VERW: 734 /* Initialize the barriers */ 735 mds_cpu_barrier1 = ncpu; 736 mds_cpu_barrier2 = ncpu; 737 738 printf("[+] %s MDS Mitigation...", 739 enabled ? "Enabling" : "Disabling"); 740 xc = xc_broadcast(XC_HIGHPRI, mitigation_mds_change_cpu, 741 (void *)enabled, NULL); 742 xc_wait(xc); 743 printf(" done!\n"); 744 mds_mitigation_enabled = enabled; 745 mds_set_name(); 746 return 0; 747 case MDS_MITIGATION_MDS_NO: 748 printf("[+] The CPU is not affected by MDS\n"); 749 return 0; 750 default: 751 panic("impossible"); 752 } 753 } 754 755 static int 756 sysctl_machdep_mds_mitigated(SYSCTLFN_ARGS) 757 { 758 struct sysctlnode node; 759 int error; 760 bool val; 761 762 val = *(bool *)rnode->sysctl_data; 763 764 node = *rnode; 765 node.sysctl_data = &val; 766 767 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 768 if (error != 0 || newp == NULL) 769 return error; 770 771 if (val == mds_mitigation_enabled) 772 return 0; 773 return mitigation_mds_change(val); 774 } 775 776 /* -------------------------------------------------------------------------- */ 777 778 enum taa_mitigation { 779 TAA_MITIGATION_NONE, 780 TAA_MITIGATION_TAA_NO, 781 TAA_MITIGATION_MDS, 782 TAA_MITIGATION_RTM_DISABLE 783 }; 784 785 static char taa_mitigation_name[64] = "(none)"; 786 787 static enum taa_mitigation taa_mitigation_method = TAA_MITIGATION_NONE; 788 static bool taa_mitigation_enabled __read_mostly = false; 789 static bool *taa_mitigation_enabled_ptr = &taa_mitigation_enabled; 790 791 static void 792 mitigation_taa_apply_cpu(struct cpu_info *ci, bool enabled) 793 { 794 uint64_t msr; 795 796 switch (taa_mitigation_method) { 797 case TAA_MITIGATION_NONE: 798 case TAA_MITIGATION_TAA_NO: 799 case TAA_MITIGATION_MDS: 800 panic("impossible"); 801 case TAA_MITIGATION_RTM_DISABLE: 802 msr = rdmsr(MSR_IA32_TSX_CTRL); 803 if (enabled) { 804 msr |= IA32_TSX_CTRL_RTM_DISABLE; 805 } else { 806 msr &= ~IA32_TSX_CTRL_RTM_DISABLE; 807 } 808 wrmsr(MSR_IA32_TSX_CTRL, msr); 809 break; 810 } 811 } 812 813 static void 814 mitigation_taa_change_cpu(void *arg1, void *arg2) 815 { 816 struct cpu_info *ci = curcpu(); 817 bool enabled = arg1 != NULL; 818 819 mitigation_taa_apply_cpu(ci, enabled); 820 } 821 822 static void 823 taa_detect_method(void) 824 { 825 u_int descs[4]; 826 uint64_t msr; 827 828 taa_mitigation_enabled_ptr = &taa_mitigation_enabled; 829 830 if (cpu_vendor != CPUVENDOR_INTEL) { 831 taa_mitigation_method = TAA_MITIGATION_TAA_NO; 832 return; 833 } 834 if (!(cpu_feature[5] & CPUID_SEF_RTM)) { 835 taa_mitigation_method = TAA_MITIGATION_TAA_NO; 836 return; 837 } 838 839 /* 840 * If the CPU doesn't have MDS_NO set, then the TAA mitigation is based 841 * on the MDS mitigation. 842 */ 843 if (cpuid_level < 7) { 844 taa_mitigation_method = TAA_MITIGATION_MDS; 845 taa_mitigation_enabled_ptr = &mds_mitigation_enabled; 846 return; 847 } 848 x86_cpuid(0x7, descs); 849 if (!(descs[3] & CPUID_SEF_ARCH_CAP)) { 850 taa_mitigation_method = TAA_MITIGATION_MDS; 851 taa_mitigation_enabled_ptr = &mds_mitigation_enabled; 852 return; 853 } 854 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 855 if (!(msr & IA32_ARCH_MDS_NO)) { 856 taa_mitigation_method = TAA_MITIGATION_MDS; 857 taa_mitigation_enabled_ptr = &mds_mitigation_enabled; 858 return; 859 } 860 861 /* 862 * Otherwise, we need the TAA-specific mitigation. 863 */ 864 if (msr & IA32_ARCH_TAA_NO) { 865 taa_mitigation_method = TAA_MITIGATION_TAA_NO; 866 return; 867 } 868 if (msr & IA32_ARCH_TSX_CTRL) { 869 taa_mitigation_method = TAA_MITIGATION_RTM_DISABLE; 870 return; 871 } 872 } 873 874 static void 875 taa_set_name(void) 876 { 877 char name[64] = ""; 878 879 switch (taa_mitigation_method) { 880 case TAA_MITIGATION_NONE: 881 strlcpy(name, "(none)", sizeof(name)); 882 break; 883 case TAA_MITIGATION_TAA_NO: 884 strlcpy(name, "[TAA_NO]", sizeof(name)); 885 break; 886 case TAA_MITIGATION_MDS: 887 strlcpy(name, "[MDS]", sizeof(name)); 888 break; 889 case TAA_MITIGATION_RTM_DISABLE: 890 if (!taa_mitigation_enabled) { 891 strlcpy(name, "(none)", sizeof(name)); 892 } else { 893 strlcpy(name, "[RTM_DISABLE]", sizeof(name)); 894 } 895 break; 896 } 897 898 strlcpy(taa_mitigation_name, name, sizeof(taa_mitigation_name)); 899 } 900 901 static int 902 mitigation_taa_change(bool enabled) 903 { 904 uint64_t xc; 905 906 taa_detect_method(); 907 908 switch (taa_mitigation_method) { 909 case TAA_MITIGATION_NONE: 910 printf("[!] No mitigation available\n"); 911 return EOPNOTSUPP; 912 case TAA_MITIGATION_TAA_NO: 913 printf("[+] The CPU is not affected by TAA\n"); 914 return 0; 915 case TAA_MITIGATION_MDS: 916 printf("[!] Mitigation based on MDS, use machdep.mds\n"); 917 taa_set_name(); 918 return EINVAL; 919 case TAA_MITIGATION_RTM_DISABLE: 920 printf("[+] %s TAA Mitigation...", 921 enabled ? "Enabling" : "Disabling"); 922 xc = xc_broadcast(XC_HIGHPRI, mitigation_taa_change_cpu, 923 (void *)enabled, NULL); 924 xc_wait(xc); 925 printf(" done!\n"); 926 taa_mitigation_enabled = enabled; 927 taa_set_name(); 928 return 0; 929 default: 930 panic("impossible"); 931 } 932 } 933 934 static int 935 sysctl_machdep_taa_mitigated(SYSCTLFN_ARGS) 936 { 937 struct sysctlnode node; 938 int error; 939 bool val; 940 941 val = *(bool *)rnode->sysctl_data; 942 943 node = *rnode; 944 node.sysctl_data = &val; 945 946 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 947 if (error != 0 || newp == NULL) 948 return error; 949 950 if (val == *taa_mitigation_enabled_ptr) 951 return 0; 952 return mitigation_taa_change(val); 953 } 954 955 /* -------------------------------------------------------------------------- */ 956 957 void speculation_barrier(struct lwp *, struct lwp *); 958 959 void 960 speculation_barrier(struct lwp *oldlwp, struct lwp *newlwp) 961 { 962 /* 963 * Speculation barriers are applicable only to Spectre V2. 964 */ 965 if (!v2_mitigation_enabled) 966 return; 967 968 /* 969 * From kernel thread to kernel thread, no need for a barrier. 970 */ 971 if ((oldlwp->l_flag & LW_SYSTEM) && (newlwp->l_flag & LW_SYSTEM)) 972 return; 973 974 switch (v2_mitigation_method) { 975 case V2_MITIGATION_INTEL_IBRS: 976 wrmsr(MSR_IA32_PRED_CMD, IA32_PRED_CMD_IBPB); 977 break; 978 default: 979 /* nothing */ 980 break; 981 } 982 } 983 984 /* 985 * cpu0 is the one that detects the method and sets the global 'enabled' 986 * variable for each mitigation. 987 */ 988 void 989 cpu_speculation_init(struct cpu_info *ci) 990 { 991 /* 992 * Spectre V2. 993 */ 994 if (ci == &cpu_info_primary) { 995 v2_detect_method(); 996 v2_mitigation_enabled = 997 (v2_mitigation_method != V2_MITIGATION_NONE); 998 v2_set_name(); 999 } 1000 if (v2_mitigation_method != V2_MITIGATION_NONE) { 1001 mitigation_v2_apply_cpu(ci, true); 1002 } 1003 1004 /* 1005 * Spectre V4. 1006 * 1007 * Disabled by default, as recommended by AMD, but can be enabled 1008 * dynamically. We only detect if the CPU is not vulnerable, to 1009 * mark it as 'mitigated' in the sysctl. 1010 */ 1011 #if 0 1012 if (ci == &cpu_info_primary) { 1013 v4_detect_method(); 1014 v4_mitigation_enabled = 1015 (v4_mitigation_method != V4_MITIGATION_NONE); 1016 v4_set_name(); 1017 } 1018 if (v4_mitigation_method != V4_MITIGATION_NONE && 1019 v4_mitigation_method != V4_MITIGATION_INTEL_SSB_NO && 1020 v4_mitigation_method != V4_MITIGATION_AMD_SSB_NO) { 1021 mitigation_v4_apply_cpu(ci, true); 1022 } 1023 #else 1024 if (ci == &cpu_info_primary) { 1025 v4_detect_method(); 1026 if (v4_mitigation_method == V4_MITIGATION_INTEL_SSB_NO || 1027 v4_mitigation_method == V4_MITIGATION_AMD_SSB_NO) { 1028 v4_mitigation_enabled = true; 1029 v4_set_name(); 1030 } 1031 } 1032 #endif 1033 1034 /* 1035 * Microarchitectural Data Sampling. 1036 */ 1037 if (ci == &cpu_info_primary) { 1038 mds_detect_method(); 1039 mds_mitigation_enabled = 1040 (mds_mitigation_method != MDS_MITIGATION_NONE); 1041 mds_set_name(); 1042 } 1043 if (mds_mitigation_method != MDS_MITIGATION_NONE && 1044 mds_mitigation_method != MDS_MITIGATION_MDS_NO) { 1045 mitigation_mds_apply_cpu(ci, true); 1046 } 1047 1048 /* 1049 * TSX Asynchronous Abort. 1050 */ 1051 if (ci == &cpu_info_primary) { 1052 taa_detect_method(); 1053 taa_mitigation_enabled = 1054 (taa_mitigation_method == TAA_MITIGATION_RTM_DISABLE) || 1055 (taa_mitigation_method == TAA_MITIGATION_TAA_NO); 1056 taa_set_name(); 1057 } 1058 if (taa_mitigation_method == TAA_MITIGATION_RTM_DISABLE) { 1059 mitigation_taa_apply_cpu(ci, true); 1060 } 1061 } 1062 1063 void sysctl_speculation_init(struct sysctllog **); 1064 1065 void 1066 sysctl_speculation_init(struct sysctllog **clog) 1067 { 1068 const struct sysctlnode *spec_rnode; 1069 1070 /* SpectreV1 */ 1071 spec_rnode = NULL; 1072 sysctl_createv(clog, 0, NULL, &spec_rnode, 1073 CTLFLAG_PERMANENT, 1074 CTLTYPE_NODE, "spectre_v1", NULL, 1075 NULL, 0, NULL, 0, 1076 CTL_MACHDEP, CTL_CREATE); 1077 sysctl_createv(clog, 0, &spec_rnode, &spec_rnode, 1078 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE, 1079 CTLTYPE_BOOL, "mitigated", 1080 SYSCTL_DESCR("Whether Spectre Variant 1 is mitigated"), 1081 NULL, 0 /* mitigated=0 */, NULL, 0, 1082 CTL_CREATE, CTL_EOL); 1083 1084 /* SpectreV2 */ 1085 spec_rnode = NULL; 1086 sysctl_createv(clog, 0, NULL, &spec_rnode, 1087 CTLFLAG_PERMANENT, 1088 CTLTYPE_NODE, "spectre_v2", NULL, 1089 NULL, 0, NULL, 0, 1090 CTL_MACHDEP, CTL_CREATE); 1091 sysctl_createv(clog, 0, &spec_rnode, NULL, 1092 CTLFLAG_READWRITE, 1093 CTLTYPE_BOOL, "hwmitigated", 1094 SYSCTL_DESCR("Whether Spectre Variant 2 is HW-mitigated"), 1095 sysctl_machdep_spectreV2_mitigated, 0, 1096 &v2_mitigation_enabled, 0, 1097 CTL_CREATE, CTL_EOL); 1098 sysctl_createv(clog, 0, &spec_rnode, NULL, 1099 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE, 1100 CTLTYPE_BOOL, "swmitigated", 1101 SYSCTL_DESCR("Whether Spectre Variant 2 is SW-mitigated"), 1102 #if defined(SPECTRE_V2_GCC_MITIGATION) 1103 NULL, 1, 1104 #else 1105 NULL, 0, 1106 #endif 1107 NULL, 0, 1108 CTL_CREATE, CTL_EOL); 1109 sysctl_createv(clog, 0, &spec_rnode, NULL, 1110 CTLFLAG_PERMANENT, 1111 CTLTYPE_STRING, "method", 1112 SYSCTL_DESCR("Mitigation method in use"), 1113 NULL, 0, 1114 v2_mitigation_name, 0, 1115 CTL_CREATE, CTL_EOL); 1116 1117 /* SpectreV4 */ 1118 spec_rnode = NULL; 1119 sysctl_createv(clog, 0, NULL, &spec_rnode, 1120 CTLFLAG_PERMANENT, 1121 CTLTYPE_NODE, "spectre_v4", NULL, 1122 NULL, 0, NULL, 0, 1123 CTL_MACHDEP, CTL_CREATE); 1124 sysctl_createv(clog, 0, &spec_rnode, NULL, 1125 CTLFLAG_READWRITE, 1126 CTLTYPE_BOOL, "mitigated", 1127 SYSCTL_DESCR("Whether Spectre Variant 4 is mitigated"), 1128 sysctl_machdep_spectreV4_mitigated, 0, 1129 &v4_mitigation_enabled, 0, 1130 CTL_CREATE, CTL_EOL); 1131 sysctl_createv(clog, 0, &spec_rnode, NULL, 1132 CTLFLAG_PERMANENT, 1133 CTLTYPE_STRING, "method", 1134 SYSCTL_DESCR("Mitigation method in use"), 1135 NULL, 0, 1136 v4_mitigation_name, 0, 1137 CTL_CREATE, CTL_EOL); 1138 1139 /* Microarchitectural Data Sampling */ 1140 spec_rnode = NULL; 1141 sysctl_createv(clog, 0, NULL, &spec_rnode, 1142 CTLFLAG_PERMANENT, 1143 CTLTYPE_NODE, "mds", NULL, 1144 NULL, 0, NULL, 0, 1145 CTL_MACHDEP, CTL_CREATE); 1146 sysctl_createv(clog, 0, &spec_rnode, NULL, 1147 CTLFLAG_READWRITE, 1148 CTLTYPE_BOOL, "mitigated", 1149 SYSCTL_DESCR("Whether MDS is mitigated"), 1150 sysctl_machdep_mds_mitigated, 0, 1151 &mds_mitigation_enabled, 0, 1152 CTL_CREATE, CTL_EOL); 1153 sysctl_createv(clog, 0, &spec_rnode, NULL, 1154 CTLFLAG_PERMANENT, 1155 CTLTYPE_STRING, "method", 1156 SYSCTL_DESCR("Mitigation method in use"), 1157 NULL, 0, 1158 mds_mitigation_name, 0, 1159 CTL_CREATE, CTL_EOL); 1160 1161 /* TSX Asynchronous Abort */ 1162 spec_rnode = NULL; 1163 sysctl_createv(clog, 0, NULL, &spec_rnode, 1164 CTLFLAG_PERMANENT, 1165 CTLTYPE_NODE, "taa", NULL, 1166 NULL, 0, NULL, 0, 1167 CTL_MACHDEP, CTL_CREATE); 1168 sysctl_createv(clog, 0, &spec_rnode, NULL, 1169 CTLFLAG_READWRITE, 1170 CTLTYPE_BOOL, "mitigated", 1171 SYSCTL_DESCR("Whether TAA is mitigated"), 1172 sysctl_machdep_taa_mitigated, 0, 1173 taa_mitigation_enabled_ptr, 0, 1174 CTL_CREATE, CTL_EOL); 1175 sysctl_createv(clog, 0, &spec_rnode, NULL, 1176 CTLFLAG_PERMANENT, 1177 CTLTYPE_STRING, "method", 1178 SYSCTL_DESCR("Mitigation method in use"), 1179 NULL, 0, 1180 taa_mitigation_name, 0, 1181 CTL_CREATE, CTL_EOL); 1182 } 1183