]> git.itanic.dy.fi Git - linux-stable/commitdiff
mm: kfence: fix PG_slab and memcg_data clearing
authorMuchun Song <songmuchun@bytedance.com>
Mon, 20 Mar 2023 03:00:59 +0000 (11:00 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 13 Apr 2023 14:55:30 +0000 (16:55 +0200)
commit 3ee2d7471fa4963a2ced0a84f0653ce88b43c5b2 upstream.

It does not reset PG_slab and memcg_data when KFENCE fails to initialize
kfence pool at runtime.  It is reporting a "Bad page state" message when
kfence pool is freed to buddy.  The checking of whether it is a compound
head page seems unnecessary since we already guarantee this when
allocating kfence pool.   Remove the check to simplify the code.

Link: https://lkml.kernel.org/r/20230320030059.20189-1-songmuchun@bytedance.com
Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: SeongJae Park <sjpark@amazon.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/kfence/core.c

index c3d04753806a291e44823f57053ed5feef1ff1a2..5e4c1abc794f810a21e28d6af5128a89b74c484f 100644 (file)
@@ -562,10 +562,6 @@ static unsigned long kfence_init_pool(void)
                if (!i || (i % 2))
                        continue;
 
-               /* Verify we do not have a compound head page. */
-               if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
-                       return addr;
-
                __folio_set_slab(slab_folio(slab));
 #ifdef CONFIG_MEMCG
                slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
@@ -598,12 +594,26 @@ static unsigned long kfence_init_pool(void)
 
                /* Protect the right redzone. */
                if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
-                       return addr;
+                       goto reset_slab;
 
                addr += 2 * PAGE_SIZE;
        }
 
        return 0;
+
+reset_slab:
+       for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+               struct slab *slab = page_slab(&pages[i]);
+
+               if (!i || (i % 2))
+                       continue;
+#ifdef CONFIG_MEMCG
+               slab->memcg_data = 0;
+#endif
+               __folio_clear_slab(slab_folio(slab));
+       }
+
+       return addr;
 }
 
 static bool __init kfence_init_pool_early(void)
@@ -633,16 +643,6 @@ static bool __init kfence_init_pool_early(void)
         * fails for the first page, and therefore expect addr==__kfence_pool in
         * most failure cases.
         */
-       for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
-               struct slab *slab = virt_to_slab(p);
-
-               if (!slab)
-                       continue;
-#ifdef CONFIG_MEMCG
-               slab->memcg_data = 0;
-#endif
-               __folio_clear_slab(slab_folio(slab));
-       }
        memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
        __kfence_pool = NULL;
        return false;