]> git.itanic.dy.fi Git - linux-stable/blobdiff - arch/x86/kernel/cpu/bugs.c
x86/bugs: Rename various 'ia32_cap' variables to 'x86_arch_cap_msr'
[linux-stable] / arch / x86 / kernel / cpu / bugs.c
index 17cc91f414759c7c31bc0e7d744c7917218554a8..82d13b02098d71531b45ad4cf58c9bd9a82d48f2 100644 (file)
@@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
 EXPORT_SYMBOL_GPL(x86_pred_cmd);
 
-static u64 __ro_after_init ia32_cap;
+static u64 __ro_after_init x86_arch_cap_msr;
 
 static DEFINE_MUTEX(spec_ctrl_mutex);
 
@@ -146,7 +146,7 @@ void __init cpu_select_mitigations(void)
                x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
        }
 
-       ia32_cap = x86_read_arch_cap_msr();
+       x86_arch_cap_msr = x86_read_arch_cap_msr();
 
        /* Select the proper CPU mitigations before patching alternatives: */
        spectre_v1_select_mitigation();
@@ -343,8 +343,8 @@ static void __init taa_select_mitigation(void)
         * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
         * update is required.
         */
-       if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
-           !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+       if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
+           !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
                taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
 
        /*
@@ -434,7 +434,7 @@ static void __init mmio_select_mitigation(void)
         * be propagated to uncore buffers, clearing the Fill buffers on idle
         * is required irrespective of SMT state.
         */
-       if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
+       if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
                static_branch_enable(&mds_idle_clear);
 
        /*
@@ -444,10 +444,10 @@ static void __init mmio_select_mitigation(void)
         * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
         * affected systems.
         */
-       if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
+       if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
            (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
             boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
-            !(ia32_cap & ARCH_CAP_MDS_NO)))
+            !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
                mmio_mitigation = MMIO_MITIGATION_VERW;
        else
                mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
@@ -505,7 +505,7 @@ static void __init rfds_select_mitigation(void)
        if (rfds_mitigation == RFDS_MITIGATION_OFF)
                return;
 
-       if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+       if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
                setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
        else
                rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
@@ -664,7 +664,7 @@ static void __init srbds_select_mitigation(void)
         * are only exposed to SRBDS when TSX is enabled or when CPU is affected
         * by Processor MMIO Stale Data vulnerability.
         */
-       if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+       if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
            !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
                srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
        else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@@ -807,7 +807,7 @@ static void __init gds_select_mitigation(void)
        /* Will verify below that mitigation _can_ be disabled */
 
        /* No microcode */
-       if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) {
+       if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
                if (gds_mitigation == GDS_MITIGATION_FORCE) {
                        /*
                         * This only needs to be done on the boot CPU so do it
@@ -1540,14 +1540,14 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
 /* Disable in-kernel use of non-RSB RET predictors */
 static void __init spec_ctrl_disable_kernel_rrsba(void)
 {
-       u64 ia32_cap;
+       u64 x86_arch_cap_msr;
 
        if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
                return;
 
-       ia32_cap = x86_read_arch_cap_msr();
+       x86_arch_cap_msr = x86_read_arch_cap_msr();
 
-       if (ia32_cap & ARCH_CAP_RRSBA) {
+       if (x86_arch_cap_msr & ARCH_CAP_RRSBA) {
                x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
                update_spec_ctrl(x86_spec_ctrl_base);
        }
@@ -1915,7 +1915,7 @@ static void update_mds_branch_idle(void)
        if (sched_smt_active()) {
                static_branch_enable(&mds_idle_clear);
        } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
-                  (ia32_cap & ARCH_CAP_FBSDP_NO)) {
+                  (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
                static_branch_disable(&mds_idle_clear);
        }
 }
@@ -2809,7 +2809,7 @@ static const char *spectre_bhi_state(void)
        else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
                return "; BHI: SW loop, KVM: SW loop";
        else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
-                !(ia32_cap & ARCH_CAP_RRSBA))
+                !(x86_arch_cap_msr & ARCH_CAP_RRSBA))
                return "; BHI: Retpoline";
        else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
                return "; BHI: Syscall hardening, KVM: SW loop";