]> git.itanic.dy.fi Git - linux-stable/commitdiff
s390: don't trace preemption in percpu macros
authorSven Schnelle <svens@linux.ibm.com>
Thu, 20 Aug 2020 07:48:23 +0000 (09:48 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 12 Sep 2020 09:45:27 +0000 (11:45 +0200)
[ Upstream commit 1196f12a2c960951d02262af25af0bb1775ebcc2 ]

Since commit a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context}
to per-cpu variables") the lockdep code itself uses percpu variables. This
leads to recursions because the percpu macros are calling preempt_enable()
which might call trace_preempt_on().

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/s390/include/asm/percpu.h

index 6d6556ca24aa2bd4cc2e8b1f566fa39da811fdc9..f715419a72cf01a781e8736ebab37a8e758926e5 100644 (file)
@@ -28,7 +28,7 @@
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ old__, new__, prev__;                                \
        pcp_op_T__ *ptr__;                                              \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        prev__ = *ptr__;                                                \
        do {                                                            \
@@ -36,7 +36,7 @@
                new__ = old__ op (val);                                 \
                prev__ = cmpxchg(ptr__, old__, new__);                  \
        } while (prev__ != old__);                                      \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        new__;                                                          \
 })
 
@@ -67,7 +67,7 @@
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ val__ = (val);                                       \
        pcp_op_T__ old__, *ptr__;                                       \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                            \
        if (__builtin_constant_p(val__) &&                              \
            ((szcast)val__ > -129) && ((szcast)val__ < 128)) {          \
@@ -83,7 +83,7 @@
                        : [val__] "d" (val__)                           \
                        : "cc");                                        \
        }                                                               \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
 }
 
 #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ val__ = (val);                                       \
        pcp_op_T__ old__, *ptr__;                                       \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        asm volatile(                                                   \
                op "    %[old__],%[val__],%[ptr__]\n"                   \
                : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
                : [val__] "d" (val__)                                   \
                : "cc");                                                \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                               \
        old__ + val__;                                                  \
 })
 
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ val__ = (val);                                       \
        pcp_op_T__ old__, *ptr__;                                       \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        asm volatile(                                                   \
                op "    %[old__],%[val__],%[ptr__]\n"                   \
                : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
                : [val__] "d" (val__)                                   \
                : "cc");                                                \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
 }
 
 #define this_cpu_and_4(pcp, val)       arch_this_cpu_to_op(pcp, val, "lan")
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ ret__;                                               \
        pcp_op_T__ *ptr__;                                              \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        ret__ = cmpxchg(ptr__, oval, nval);                             \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        ret__;                                                          \
 })
 
 ({                                                                     \
        typeof(pcp) *ptr__;                                             \
        typeof(pcp) ret__;                                              \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        ret__ = xchg(ptr__, nval);                                      \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        ret__;                                                          \
 })
 
        typeof(pcp1) *p1__;                                             \
        typeof(pcp2) *p2__;                                             \
        int ret__;                                                      \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        p1__ = raw_cpu_ptr(&(pcp1));                                    \
        p2__ = raw_cpu_ptr(&(pcp2));                                    \
        ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__);   \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        ret__;                                                          \
 })