summaryrefslogtreecommitdiff
authorTao Zeng <tao.zeng@amlogic.com>2019-08-28 07:25:40 (GMT)
committer Shen Liu <shen.liu@amlogic.com>2020-07-22 08:52:41 (GMT)
commit66a94b3c93135a8b09e10bbb5ba06ff8eeb2889f (patch)
treed1832feccb41a02fe9cc698b90090e604ecf131d
parentda25c8270a12d23ea9b02f86d5dda212d21f2d3f (diff)
downloadcommon-66a94b3c93135a8b09e10bbb5ba06ff8eeb2889f.zip
common-66a94b3c93135a8b09e10bbb5ba06ff8eeb2889f.tar.gz
common-66a94b3c93135a8b09e10bbb5ba06ff8eeb2889f.tar.bz2
mm: fix wrong kasan report [1/1]
PD#SWPL-13281 Problem: There are 2 types of wrong kasan report after merge change of save wasted slab. 1, slab-out-of-bounds, which is caused by krealloc set shadow memory out-of-range, since tail of page was freed. 2, use-after-free, which is caused by kasan_free_pages called after a page freed. Because this function already called in free_page, so it marked shadow memory twice. Solution: 1, make shadow do not out of range if a tail page was freed and been realloc again. 2, remove call of kasan_free_pages. Verify: X301 Change-Id: Ib5bdcbb618a783920009bb97d112c361888b0d7c Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
Diffstat
-rw-r--r--drivers/amlogic/memory_ext/Kconfig1
-rw-r--r--mm/kasan/kasan.c5
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/slub.c1
4 files changed, 24 insertions, 2 deletions
diff --git a/drivers/amlogic/memory_ext/Kconfig b/drivers/amlogic/memory_ext/Kconfig
index d0344f4..449218d 100644
--- a/drivers/amlogic/memory_ext/Kconfig
+++ b/drivers/amlogic/memory_ext/Kconfig
@@ -42,7 +42,6 @@ config AMLOGIC_CMA
config AMLOGIC_VMAP
bool "Amlogic kernel stack"
depends on AMLOGIC_MEMORY_EXTEND
- depends on !KASAN
depends on 64BIT
default y
help
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 8622541..fc8b263 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -626,6 +626,11 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
redzone_start = round_up((unsigned long)(ptr + size),
KASAN_SHADOW_SCALE_SIZE);
redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+ if (PageOwnerPriv1(page)) { /* end of this page was freed */
+ redzone_end = (unsigned long)ptr + PAGE_ALIGN(size);
+ }
+#endif
kasan_unpoison_shadow(ptr, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d30cb0c..40dde36 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -931,7 +931,17 @@ done_merging:
}
}
+#if defined(CONFIG_AMLOGIC_MEMORY_EXTEND) && defined(CONFIG_KASAN)
+ /*
+ * always put freed page to tail of buddy system, in
+ * order to increase probability of use-after-free
+ * for KASAN check.
+ */
+ list_add_tail(&page->lru,
+ &zone->free_area[order].free_list[migratetype]);
+#else
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
+#endif
out:
zone->free_area[order].nr_free++;
#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
@@ -2622,10 +2632,19 @@ void free_hot_cold_page(struct page *page, bool cold)
}
pcp = &this_cpu_ptr(zone->pageset)->pcp;
+#if defined(CONFIG_AMLOGIC_MEMORY_EXTEND) && defined(CONFIG_KASAN)
+ /*
+ * always put freed page to tail of buddy system, in
+ * order to increase probability of use-after-free
+ * for KASAN check.
+ */
+ list_add_tail(&page->lru, &pcp->lists[migratetype]);
+#else
if (!cold)
list_add(&page->lru, &pcp->lists[migratetype]);
else
list_add_tail(&page->lru, &pcp->lists[migratetype]);
+#endif
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/mm/slub.c b/mm/slub.c
index 59ec527..7dfc2a4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3734,7 +3734,6 @@ static void aml_slub_free_large(struct page *page, const void *obj)
__func__, page_address(page), nr_pages, obj);
for (i = 0; i < nr_pages; i++) {
__free_pages(page, 0);
- kasan_free_pages(page, 0);
page++;
}
}