]> git.itanic.dy.fi Git - linux-stable/commitdiff
x86/nospec: Fix i386 RSB stuffing
authorPeter Zijlstra <peterz@infradead.org>
Fri, 19 Aug 2022 11:01:35 +0000 (13:01 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 15 Sep 2022 10:17:06 +0000 (12:17 +0200)
commit 332924973725e8cdcc783c175f68cf7e162cb9e5 upstream.

Turns out that i386 doesn't unconditionally have LFENCE, as such the
loop in __FILL_RETURN_BUFFER isn't actually speculation safe on such
chips.

Fixes: ba6e31af2be9 ("x86/speculation: Add LFENCE to RSB fill sequence")
Reported-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/Yv9tj9vbQ9nNlXoY@worktop.programming.kicks-ass.net
[bwh: Backported to 4.19/5.4:
 - __FILL_RETURN_BUFFER takes an sp parameter
 - Open-code __FILL_RETURN_SLOT]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/nospec-branch.h

index 747549934fe32fe4cbfd8c76502869983d031303..17a236a8b2370b449ec030d6d021f994b9d70567 100644 (file)
@@ -35,6 +35,7 @@
  * the optimal version — two calls, each with their own speculation
  * trap should their return address end up getting used, in a loop.
  */
+#ifdef CONFIG_X86_64
 #define __FILL_RETURN_BUFFER(reg, nr, sp)      \
        mov     $(nr/2), reg;                   \
 771:                                           \
        add     $(BITS_PER_LONG/8) * nr, sp;    \
        /* barrier for jnz misprediction */     \
        lfence;
+#else
+/*
+ * i386 doesn't unconditionally have LFENCE, as such it can't
+ * do a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr, sp)      \
+       .rept nr;                               \
+       call    772f;                           \
+       int3;                                   \
+772:;                                          \
+       .endr;                                  \
+       add     $(BITS_PER_LONG/8) * nr, sp;
+#endif
 
 /* Sequence to mitigate PBRSB on eIBRS CPUs */
 #define __ISSUE_UNBALANCED_RET_GUARD(sp)       \