author | Tao Zeng <tao.zeng@amlogic.com> | 2019-08-28 07:25:40 (GMT) |
---|---|---|
committer | Jianxin Pan <jianxin.pan@amlogic.com> | 2019-08-29 06:36:43 (GMT) |
commit | becb83999e19d2055458f08a2b7a44bd1170853e (patch) | |
tree | 36356830d39974b3d21f579a1d49723fb54e3c3d | |
parent | 6a108be69d58805c6bc426f78e645d8abb7d5cf8 (diff) | |
download | common-becb83999e19d2055458f08a2b7a44bd1170853e.zip common-becb83999e19d2055458f08a2b7a44bd1170853e.tar.gz common-becb83999e19d2055458f08a2b7a44bd1170853e.tar.bz2 |
mm: fix wrong kasan report [1/1]
PD#SWPL-13281
Problem:
There are 2 types of wrong kasan report after merge change of
save wasted slab.
1, slab-out-of-bounds, which is caused by krealloc set shadow
memory out-of-range, since tail of page was freed.
2, use-after-free, which is caused by kasan_free_pages called
after a page freed. Because this function already called in
free_page, so it marked shadow memory twice.
Solution:
1, make shadow do not out of range if a tail page was freed and
been realloc again.
2, remove call of kasan_free_pages.
Verify:
X301
Change-Id: Ib5bdcbb618a783920009bb97d112c361888b0d7c
Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
-rw-r--r-- | drivers/amlogic/memory_ext/Kconfig | 1 | ||||
-rw-r--r-- | mm/kasan/kasan.c | 5 | ||||
-rw-r--r-- | mm/page_alloc.c | 19 | ||||
-rw-r--r-- | mm/slub.c | 1 |
4 files changed, 24 insertions, 2 deletions
diff --git a/drivers/amlogic/memory_ext/Kconfig b/drivers/amlogic/memory_ext/Kconfig index 8bb640e..0da0422 100644 --- a/drivers/amlogic/memory_ext/Kconfig +++ b/drivers/amlogic/memory_ext/Kconfig @@ -55,7 +55,6 @@ config AMLOGIC_KASAN32 config AMLOGIC_VMAP bool "Amlogic kernel stack" depends on AMLOGIC_MEMORY_EXTEND - depends on !KASAN default y help This config is used to enable amlogic kernel stack diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 35408ec..55cd682 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -641,6 +641,11 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) redzone_start = round_up((unsigned long)(ptr + size), KASAN_SHADOW_SCALE_SIZE); redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); +#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND + if (PageOwnerPriv1(page)) { /* end of this page was freed */ + redzone_end = (unsigned long)ptr + PAGE_ALIGN(size); + } +#endif kasan_unpoison_shadow(ptr, size); kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b453904..9c45a55 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -931,7 +931,17 @@ done_merging: } } +#if defined(CONFIG_AMLOGIC_MEMORY_EXTEND) && defined(CONFIG_KASAN) + /* + * always put freed page to tail of buddy system, in + * order to increase probability of use-after-free + * for KASAN check. + */ + list_add_tail(&page->lru, + &zone->free_area[order].free_list[migratetype]); +#else list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); +#endif out: zone->free_area[order].nr_free++; #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND @@ -2622,10 +2632,19 @@ void free_hot_cold_page(struct page *page, bool cold) } pcp = &this_cpu_ptr(zone->pageset)->pcp; +#if defined(CONFIG_AMLOGIC_MEMORY_EXTEND) && defined(CONFIG_KASAN) + /* + * always put freed page to tail of buddy system, in + * order to increase probability of use-after-free + * for KASAN check. + */ + list_add_tail(&page->lru, &pcp->lists[migratetype]); +#else if (!cold) list_add(&page->lru, &pcp->lists[migratetype]); else list_add_tail(&page->lru, &pcp->lists[migratetype]); +#endif pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); @@ -3734,7 +3734,6 @@ static void aml_slub_free_large(struct page *page, const void *obj) __func__, page_address(page), nr_pages, obj); for (i = 0; i < nr_pages; i++) { __free_pages(page, 0); - kasan_free_pages(page, 0); page++; } } |