]> git.itanic.dy.fi Git - linux-stable/commitdiff
x86/retpoline/crypto: Convert crypto assembler indirect jumps
authorDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 11 Jan 2018 21:46:27 +0000 (21:46 +0000)
committerSasha Levin <alexander.levin@microsoft.com>
Wed, 21 Mar 2018 03:49:43 +0000 (23:49 -0400)
[ Upstream commit 9697fa39efd3fc3692f2949d4045f393ec58450b ]

Convert all indirect jumps in crypto assembler code to use non-speculative
sequences when CONFIG_RETPOLINE is enabled.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Arjan van de Ven <arjan@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: gnomes@lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: thomas.lendacky@amd.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kees Cook <keescook@google.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: https://lkml.kernel.org/r/1515707194-20531-6-git-send-email-dwmw@amazon.co.uk
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
arch/x86/crypto/aesni-intel_asm.S
arch/x86/crypto/camellia-aesni-avx-asm_64.S
arch/x86/crypto/camellia-aesni-avx2-asm_64.S
arch/x86/crypto/crc32c-pcl-intel-asm_64.S

index 6bd2c6c95373f58dc8f915f5e50967105bf404d0..3f93dedb5a4dc33ab73e45abc69e0c9e33e4614e 100644 (file)
@@ -31,6 +31,7 @@
 
 #include <linux/linkage.h>
 #include <asm/inst.h>
+#include <asm/nospec-branch.h>
 
 /*
  * The following macros are used to move an (un)aligned 16 byte value to/from
@@ -2714,7 +2715,7 @@ ENTRY(aesni_xts_crypt8)
        pxor INC, STATE4
        movdqu IV, 0x30(OUTP)
 
-       call *%r11
+       CALL_NOSPEC %r11
 
        movdqu 0x00(OUTP), INC
        pxor INC, STATE1
@@ -2759,7 +2760,7 @@ ENTRY(aesni_xts_crypt8)
        _aesni_gf128mul_x_ble()
        movups IV, (IVP)
 
-       call *%r11
+       CALL_NOSPEC %r11
 
        movdqu 0x40(OUTP), INC
        pxor INC, STATE1
index ce71f9212409f16326ffc92efec812c2be7d731c..5881756f78a21c1c2db89625dd6f56120eecd7a3 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-branch.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -1210,7 +1211,7 @@ camellia_xts_crypt_16way:
        vpxor 14 * 16(%rax), %xmm15, %xmm14;
        vpxor 15 * 16(%rax), %xmm15, %xmm15;
 
-       call *%r9;
+       CALL_NOSPEC %r9;
 
        addq $(16 * 16), %rsp;
 
index 0e0b8863a34bd168c618941407b2890c385a442d..0d45b04b490a86ee0aab0fdd83efe9d1a53d0efc 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-branch.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -1323,7 +1324,7 @@ camellia_xts_crypt_32way:
        vpxor 14 * 32(%rax), %ymm15, %ymm14;
        vpxor 15 * 32(%rax), %ymm15, %ymm15;
 
-       call *%r9;
+       CALL_NOSPEC %r9;
 
        addq $(16 * 32), %rsp;
 
index 225be06edc80982f9509e25af09e1ac0a0232237..5fc80c880a164cfb779d8536cfde312f937c65e0 100644 (file)
@@ -45,6 +45,7 @@
 
 #include <asm/inst.h>
 #include <linux/linkage.h>
+#include <asm/nospec-branch.h>
 
 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
 
@@ -172,7 +173,7 @@ continue_block:
        movzxw  (bufp, %rax, 2), len
        offset=crc_array-jump_table
        lea     offset(bufp, len, 1), bufp
-       jmp     *bufp
+       JMP_NOSPEC bufp
 
        ################################################################
        ## 2a) PROCESS FULL BLOCKS: