]> git.itanic.dy.fi Git - linux-stable/commitdiff
ARM: 9282/1: vfp: Manipulate task VFP state with softirqs disabled
authorArd Biesheuvel <ardb@kernel.org>
Thu, 22 Dec 2022 17:49:51 +0000 (18:49 +0100)
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Wed, 11 Jan 2023 16:21:20 +0000 (16:21 +0000)
In a subsequent patch, we will relax the kernel mode NEON policy, and
permit kernel mode NEON to be used not only from task context, as is
permitted today, but also from softirq context.

Given that softirqs may trigger over the back of any IRQ unless they are
explicitly disabled, we need to address the resulting races in the VFP
state handling, by disabling softirq processing in two distinct but
related cases:
- kernel mode NEON will leave the FPU disabled after it completes, so
  any kernel code sequence that enables the FPU and subsequently accesses
  its registers needs to disable softirqs until it completes;
- kernel_neon_begin() will preserve the userland VFP state in memory,
  and if it interrupts the ordinary VFP state preserve sequence, the
  latter will resume execution with the VFP registers corrupted, and
  happily continue saving them to memory.

Given that disabling softirqs also disables preemption, we can replace
the existing preempt_disable/enable occurrences in the VFP state
handling asm code with new macros that dis/enable softirqs instead.
In the VFP state handling C code, add local_bh_disable/enable() calls
in those places where the VFP state is preserved.

One thing to keep in mind is that, once we allow NEON use in softirq
context, the result of any such interruption is that the FPEXC_EN bit in
the FPEXC register will be cleared, and vfp_current_hw_state[cpu] will
be NULL. This means that any sequence that [conditionally] clears
FPEXC_EN and/or sets vfp_current_hw_state[cpu] to NULL does not need to
run with softirqs disabled, as the result will be the same. Furthermore,
the handling of THREAD_NOTIFY_SWITCH is guaranteed to run with IRQs
disabled, and so it does not need protection from softirq interruptions
either.

Tested-by: Martin Willi <martin@strongswan.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
arch/arm/include/asm/assembler.h
arch/arm/kernel/asm-offsets.c
arch/arm/vfp/entry.S
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfpmodule.c

index 28e18f79c300fd2ebc4d8b61770fdef8203c7dbc..06b48ce23e1ca245fb87d68d6a26213d6bff6dd5 100644 (file)
@@ -236,21 +236,26 @@ THUMB(    fpreg   .req    r7      )
        sub     \tmp, \tmp, #1                  @ decrement it
        str     \tmp, [\ti, #TI_PREEMPT]
        .endm
-
-       .macro  dec_preempt_count_ti, ti, tmp
-       get_thread_info \ti
-       dec_preempt_count \ti, \tmp
-       .endm
 #else
        .macro  inc_preempt_count, ti, tmp
        .endm
 
        .macro  dec_preempt_count, ti, tmp
        .endm
+#endif
+
+       .macro  local_bh_disable, ti, tmp
+       ldr     \tmp, [\ti, #TI_PREEMPT]
+       add     \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
+       str     \tmp, [\ti, #TI_PREEMPT]
+       .endm
 
-       .macro  dec_preempt_count_ti, ti, tmp
+       .macro  local_bh_enable_ti, ti, tmp
+       get_thread_info \ti
+       ldr     \tmp, [\ti, #TI_PREEMPT]
+       sub     \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
+       str     \tmp, [\ti, #TI_PREEMPT]
        .endm
-#endif
 
 #define USERL(l, x...)                         \
 9999:  x;                                      \
index 2c8d76fd7c66298ad2fd8c10d471953fc930ad1c..38121c59cbc26cdd51afc30e3593b909e3890c18 100644 (file)
@@ -56,6 +56,7 @@ int main(void)
   DEFINE(VFP_CPU,              offsetof(union vfp_state, hard.cpu));
 #endif
 #endif
+  DEFINE(SOFTIRQ_DISABLE_OFFSET,SOFTIRQ_DISABLE_OFFSET);
 #ifdef CONFIG_ARM_THUMBEE
   DEFINE(TI_THUMBEE_STATE,     offsetof(struct thread_info, thumbee_state));
 #endif
index 27b0a1f27fbdf392e882d049045fd8102fe31b6e..9a89264cdcc0b46ec8fc9895219f09dc52830378 100644 (file)
@@ -22,7 +22,7 @@
 @  IRQs enabled.
 @
 ENTRY(do_vfp)
-       inc_preempt_count r10, r4
+       local_bh_disable r10, r4
        ldr     r4, .LCvfp
        ldr     r11, [r10, #TI_CPU]     @ CPU number
        add     r10, r10, #TI_VFPSTATE  @ r10 = workspace
@@ -30,7 +30,7 @@ ENTRY(do_vfp)
 ENDPROC(do_vfp)
 
 ENTRY(vfp_null_entry)
-       dec_preempt_count_ti r10, r4
+       local_bh_enable_ti r10, r4
        ret     lr
 ENDPROC(vfp_null_entry)
 
index 6f7926c9c1790f6675bac00adc1a58f10b67002e..26c4f61ecfa39638ac9c1442b68409601ef515ad 100644 (file)
@@ -175,7 +175,7 @@ vfp_hw_state_valid:
                                        @ else it's one 32-bit instruction, so
                                        @ always subtract 4 from the following
                                        @ instruction address.
-       dec_preempt_count_ti r10, r4
+       local_bh_enable_ti r10, r4
        ret     r9                      @ we think we have handled things
 
 
@@ -200,7 +200,7 @@ skip:
        @ not recognised by VFP
 
        DBGSTR  "not VFP"
-       dec_preempt_count_ti r10, r4
+       local_bh_enable_ti r10, r4
        ret     lr
 
 process_exception:
index 281110423871caef60ccb9877a9447e7c9877880..86fb0be41ae14b6c1225f94acc62eb7ab85a0233 100644 (file)
@@ -416,7 +416,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
        if (exceptions)
                vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
  exit:
-       preempt_enable();
+       local_bh_enable();
 }
 
 static void vfp_enable(void *unused)
@@ -517,6 +517,8 @@ void vfp_sync_hwstate(struct thread_info *thread)
 {
        unsigned int cpu = get_cpu();
 
+       local_bh_disable();
+
        if (vfp_state_in_hw(cpu, thread)) {
                u32 fpexc = fmrx(FPEXC);
 
@@ -528,6 +530,7 @@ void vfp_sync_hwstate(struct thread_info *thread)
                fmxr(FPEXC, fpexc);
        }
 
+       local_bh_enable();
        put_cpu();
 }
 
@@ -717,6 +720,8 @@ void kernel_neon_begin(void)
        unsigned int cpu;
        u32 fpexc;
 
+       local_bh_disable();
+
        /*
         * Kernel mode NEON is only allowed outside of interrupt context
         * with preemption disabled. This will make sure that the kernel
@@ -739,6 +744,7 @@ void kernel_neon_begin(void)
                vfp_save_state(vfp_current_hw_state[cpu], fpexc);
 #endif
        vfp_current_hw_state[cpu] = NULL;
+       local_bh_enable();
 }
 EXPORT_SYMBOL(kernel_neon_begin);