]> git.itanic.dy.fi Git - linux-stable/blobdiff - arch/x86/kernel/cpu/common.c
x86/bugs: Rename various 'ia32_cap' variables to 'x86_arch_cap_msr'
[linux-stable] / arch / x86 / kernel / cpu / common.c
index 785fedddb5f09c499d74da30c70b246269f1b626..46603c6e400aa326c0aeabb7aee7d4065868cd9d 100644 (file)
@@ -1327,25 +1327,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
 
 u64 x86_read_arch_cap_msr(void)
 {
-       u64 ia32_cap = 0;
+       u64 x86_arch_cap_msr = 0;
 
        if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
-               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
 
-       return ia32_cap;
+       return x86_arch_cap_msr;
 }
 
-static bool arch_cap_mmio_immune(u64 ia32_cap)
+static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
 {
-       return (ia32_cap & ARCH_CAP_FBSDP_NO &&
-               ia32_cap & ARCH_CAP_PSDP_NO &&
-               ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
+       return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
+               x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
+               x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
 }
 
-static bool __init vulnerable_to_rfds(u64 ia32_cap)
+static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
 {
        /* The "immunity" bit trumps everything else: */
-       if (ia32_cap & ARCH_CAP_RFDS_NO)
+       if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
                return false;
 
        /*
@@ -1353,7 +1353,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
         * indicate that mitigation is needed because guest is running on a
         * vulnerable hardware or may migrate to such hardware:
         */
-       if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+       if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
                return true;
 
        /* Only consult the blacklist when there is no enumeration: */
@@ -1362,11 +1362,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
 
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
-       u64 ia32_cap = x86_read_arch_cap_msr();
+       u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
 
        /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
        if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
-           !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+           !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
                setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
 
        if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
@@ -1378,7 +1378,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
        if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
-           !(ia32_cap & ARCH_CAP_SSB_NO) &&
+           !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 
@@ -1386,15 +1386,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
         * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
         * flag and protect from vendor-specific bugs via the whitelist.
         */
-       if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+       if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
                setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
                if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
-                   !(ia32_cap & ARCH_CAP_PBRSB_NO))
+                   !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
                        setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
        }
 
        if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
-           !(ia32_cap & ARCH_CAP_MDS_NO)) {
+           !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
                setup_force_cpu_bug(X86_BUG_MDS);
                if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
                        setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
@@ -1413,9 +1413,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
         * TSX_CTRL check alone is not sufficient for cases when the microcode
         * update is not present or running as guest that don't get TSX_CTRL.
         */
-       if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+       if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
            (cpu_has(c, X86_FEATURE_RTM) ||
-            (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+            (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
                setup_force_cpu_bug(X86_BUG_TAA);
 
        /*
@@ -1441,7 +1441,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
         * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
         * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
         */
-       if (!arch_cap_mmio_immune(ia32_cap)) {
+       if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
                if (cpu_matches(cpu_vuln_blacklist, MMIO))
                        setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
                else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
@@ -1449,7 +1449,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        }
 
        if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
-               if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
+               if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
                        setup_force_cpu_bug(X86_BUG_RETBLEED);
        }
 
@@ -1467,15 +1467,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
         * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
         * which means that AVX will be disabled.
         */
-       if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
+       if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
            boot_cpu_has(X86_FEATURE_AVX))
                setup_force_cpu_bug(X86_BUG_GDS);
 
-       if (vulnerable_to_rfds(ia32_cap))
+       if (vulnerable_to_rfds(x86_arch_cap_msr))
                setup_force_cpu_bug(X86_BUG_RFDS);
 
        /* When virtualized, eIBRS could be hidden, assume vulnerable */
-       if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
+       if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
            !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
            (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
             boot_cpu_has(X86_FEATURE_HYPERVISOR)))
@@ -1485,7 +1485,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                return;
 
        /* Rogue Data Cache Load? No! */
-       if (ia32_cap & ARCH_CAP_RDCL_NO)
+       if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
                return;
 
        setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);