summaryrefslogtreecommitdiff
authorTao Zeng <tao.zeng@amlogic.com>2019-04-16 01:42:24 (GMT)
committer Tao Zeng <tao.zeng@amlogic.com>2019-04-18 07:10:08 (GMT)
commit3bed10b47f032e12d14cd65704a4c8d737192d82 (patch)
tree04cc00d2b017ad5176fa09a8220bd9b60765e388
parentbdd35d785985d4a6be283f1d85d70d1755304643 (diff)
downloadcommon-3bed10b47f032e12d14cd65704a4c8d737192d82.zip
common-3bed10b47f032e12d14cd65704a4c8d737192d82.tar.gz
common-3bed10b47f032e12d14cd65704a4c8d737192d82.tar.bz2
kasan: bring up KASAN for 32bit os [2/2]
PD#SWPL-7085 Problem: Currently kasan can't be used on 32bit kernel, it's difficult to debug memory problems; Solution: Bring up KASAN on 32bit kernel Verify: p212 Change-Id: I4d80568f023315994e969c79b786eba856177c9c Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
Diffstat
-rw-r--r--MAINTAINERS5
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/string.c2
-rw-r--r--arch/arm/include/asm/highmem.h5
-rw-r--r--arch/arm/include/asm/kasan.h66
-rw-r--r--arch/arm/include/asm/memory.h28
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/string.h19
-rw-r--r--arch/arm/include/asm/thread_info.h6
-rw-r--r--arch/arm/include/asm/uaccess.h15
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/head-common.S3
-rw-r--r--arch/arm/kernel/module.c18
-rw-r--r--arch/arm/kernel/setup.c6
-rw-r--r--arch/arm/kernel/unwind.c19
-rw-r--r--arch/arm/lib/memcpy.S8
-rw-r--r--arch/arm/lib/memmove.S8
-rw-r--r--arch/arm/lib/memset.S8
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/init.c9
-rw-r--r--arch/arm/mm/kasan_init.c218
-rw-r--r--arch/arm/mm/mmu.c38
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--drivers/amlogic/memory_ext/Kconfig13
-rw-r--r--drivers/of/of_reserved_mem.c12
-rw-r--r--mm/kasan/kasan.c20
28 files changed, 543 insertions, 4 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 754bd64..1ad3a7e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13541,6 +13541,11 @@ AMLOGIC boot config for M8B
M: Tao Zeng <tao.zeng@amlogic.com>
F: arch/arm/mach-meson/Makefile.boot
+AMLOGIC implementation for 32bit kasan
+M: Tao Zeng <tao.zeng@amlogic.com>
+F: arch/arm/include/asm/kasan.h
+F: arch/arm/mm/kasan_init.c
+
HDMITX OUTPUT DRIVER
M: Yi Zhou <yi.zhou@amlogic.com>
M: Zongdong Jiao <zongdong.jiao@amlogic.com>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0bd89d9..da92f87 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -90,6 +90,7 @@ config ARM
select PERF_USE_VMALLOC
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
+ select HAVE_ARCH_KASAN
# Above selects are sorted alphabetically; please add new ones
# according to that. Thanks.
help
@@ -1444,6 +1445,7 @@ config PAGE_OFFSET
default 0x40000000 if VMSPLIT_1G
default 0x80000000 if VMSPLIT_2G
default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0xD0000000 if AMLOGIC_KASAN32
default 0xC0000000
config NR_CPUS
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 936ce8d..62732af 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -52,6 +52,10 @@ AS += -EL
LD += -EL
endif
+ifeq ($(CONFIG_KASAN),y)
+KASAN_SHADOW_OFFSET := 0xA0000000
+endif
+
#
# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
# later may result in code being generated that handles signed short and signed
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index d50430c..654dcb9 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -4,6 +4,8 @@
# create a compressed vmlinuz image from the original vmlinux
#
+KASAN_SANITIZE := n
+
OBJS =
AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
diff --git a/arch/arm/boot/compressed/string.c b/arch/arm/boot/compressed/string.c
index 6894674..d7b27c5 100644
--- a/arch/arm/boot/compressed/string.c
+++ b/arch/arm/boot/compressed/string.c
@@ -120,7 +120,9 @@ char *strchr(const char *s, int c)
return (char *)s;
}
+#ifndef CONFIG_AMLOGIC_KASAN32 /* for compile problems */
#undef memset
+#endif
void *memset(void *s, int c, size_t count)
{
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 0a0e2d1..e3ff8a8 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -3,7 +3,12 @@
#include <asm/kmap_types.h>
+#ifdef CONFIG_AMLOGIC_KASAN32
+#include <asm/memory.h>
+#define PKMAP_BASE VMALLOC_END
+#else
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
+#endif
#define LAST_PKMAP PTRS_PER_PTE
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
diff --git a/arch/arm/include/asm/kasan.h b/arch/arm/include/asm/kasan.h
new file mode 100644
index 0000000..7b86cb0
--- a/dev/null
+++ b/arch/arm/include/asm/kasan.h
@@ -0,0 +1,66 @@
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/linkage.h>
+#include <asm/memory.h>
+
+#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+#define PAGE_KERNEL_RO _MOD_PROT(pgprot_kernel, L_PTE_RDONLY)
+
+/*
+ * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
+ * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
+ *
+ * For 32bit KASAN, we using a fixed Memory map here
+ *
+ * 0x00000000 +--------+
+ * | |
+ * | |
+ * | | User space memory, 2944MB
+ * | |
+ * | |
+ * 0xb8000000 +--------+
+ * | | Kasan shaddow memory, 128MB
+ * 0xc0000000 +--------+
+ * | | Vmalloc address, 240MB
+ * | |
+ * 0xCF400000 +--------+
+ * 0xCF600000 +--------+ PKmap, for kmap 2MB
+ * 0xD0000000 +--------+ Module and pkmap, 10MB
+ * | |
+ * | | Kernel linear mapped space, 762MB
+ * 0xFFa00000 +--------+
+ * 0xFFFc0000 +--------+ static map, 2MB
+ * 0xFFF00000 +--------+ Fixed map, for kmap_atomic, 3MB
+ * 0xFFFF0000 +--------+ High vector, 4KB
+ *
+ */
+#define KADDR_SIZE (SZ_1G)
+#define KASAN_SHADOW_SIZE (KADDR_SIZE >> 3)
+#define KASAN_SHADOW_START (TASK_SIZE)
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+/*
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ *
+ */
+#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_START - (VMALLOC_START >> 3))
+struct map_desc;
+void kasan_init(void);
+void kasan_copy_shadow(pgd_t *pgdir);
+asmlinkage void kasan_early_init(void);
+void cpu_v7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+void create_mapping(struct map_desc *md);
+#else
+static inline void kasan_init(void) { }
+static inline void kasan_copy_shadow(pgd_t *pgdir) { }
+#endif
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 508cee7..f033759 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -39,15 +39,22 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_64M))
-#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
-#else /* CONFIG_AMLOGIC_VMAP */
+#elif defined(CONFIG_AMLOGIC_KASAN32)
+/*
+ * reserve 128MB address space for kasan
+ * for this memory layout implementation, PAGE_OFFSET should be 0xD0000000
+ */
+#define VMALLOC_START (UL(CONFIG_PAGE_OFFSET) - UL(SZ_256M))
+#define TASK_SIZE (VMALLOC_START - UL(SZ_128M))
+#define KMEM_END (0xffa00000UL)
+#else
/*
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
-#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
#endif /* CONFIG_AMLOGIC_VMAP */
+#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
* The maximum size of a 26-bit user space task.
@@ -66,13 +73,24 @@
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
#ifndef CONFIG_THUMB2_KERNEL
+#ifdef CONFIG_AMLOGIC_KASAN32
+/*
+ * to fix module link problem
+ */
+#define MODULES_VADDR (PAGE_OFFSET - SZ_16M + SZ_4M + SZ_2M)
+#else
#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
+#endif
#else
/* smaller range for Thumb-2 symbols relocation (2^24)*/
#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
#endif
#endif /* CONFIG_AMLOGIC_VMAP */
+#ifdef CONFIG_AMLOGIC_KASAN32
+#define VMALLOC_END (MODULES_VADDR - SZ_2M)
+#endif
+
#if TASK_SIZE > MODULES_VADDR
#error Top of user space clashes with start of module space
#endif
@@ -80,11 +98,15 @@
/*
* The highmem pkmap virtual space shares the end of the module area.
*/
+#ifdef CONFIG_AMLOGIC_KASAN32
+#define MODULES_END (PAGE_OFFSET)
+#else
#ifdef CONFIG_HIGHMEM
#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
#else
#define MODULES_END (PAGE_OFFSET)
#endif
+#endif
/*
* The XIP kernel gets mapped at the bottom of the module vm area.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index a8d656d..b4fe2f6 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -41,9 +41,11 @@
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
+#ifndef CONFIG_AMLOGIC_KASAN32
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END 0xff800000UL
+#endif /* !CONFIG_AMLOGIC_KASAN32 */
#define LIBRARY_TEXT_START 0x0c000000
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index cf4f3aa..8b9e902 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -26,6 +26,24 @@ extern void * memset(void *, int, __kernel_size_t);
extern void __memzero(void *ptr, __kernel_size_t n);
+#ifdef CONFIG_AMLOGIC_KASAN32
+/* replace default function to check kasan */
+extern void *__memcpy(void *dst, const void *src, __kernel_size_t size);
+extern void *__memmove(void *dst, const void *src, __kernel_size_t size);
+extern void *__memset(void *dst, int v, __kernel_size_t size);
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#endif
+
+#else
#define memset(p,v,n) \
({ \
void *__p = (p); size_t __n = n; \
@@ -39,3 +57,4 @@ extern void __memzero(void *ptr, __kernel_size_t n);
})
#endif
+#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index fa89009..557bd65 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -16,8 +16,14 @@
#include <asm/fpstate.h>
#include <asm/page.h>
+#ifdef CONFIG_AMLOGIC_KASAN32
+#define THREAD_SIZE_ORDER 2
+#else
#define THREAD_SIZE_ORDER 1
+#endif /* CONFIG_AMLOGIC_KASAN32 */
+
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
#ifdef CONFIG_AMLOGIC_VMAP
#define THREAD_INFO_SIZE (sizeof(struct thread_info))
#define THREAD_INFO_OFFSET (THREAD_SIZE - THREAD_INFO_SIZE)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index b7e0125..576742c 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -18,6 +18,9 @@
#include <asm/domain.h>
#include <asm/unified.h>
#include <asm/compiler.h>
+#ifdef CONFIG_AMLOGIC_KASAN32
+#include <linux/kasan-checks.h>
+#endif
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#include <asm-generic/uaccess-unaligned.h>
@@ -532,6 +535,9 @@ __clear_user(void __user *addr, unsigned long n)
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
+#ifdef CONFIG_AMLOGIC_KASAN32
+ kasan_check_write(to, n);
+#endif
check_object_size(to, n, false);
return __arch_copy_from_user(to, from, n);
}
@@ -540,6 +546,9 @@ static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
+#ifdef CONFIG_AMLOGIC_KASAN32
+ kasan_check_write(to, n);
+#endif
check_object_size(to, n, false);
@@ -553,6 +562,9 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
+#ifdef CONFIG_AMLOGIC_KASAN32
+ kasan_check_read(from, n);
+#endif
check_object_size(from, n, true);
return __arch_copy_to_user(to, from, n);
@@ -561,6 +573,9 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
+#ifdef CONFIG_AMLOGIC_KASAN32
+ kasan_check_read(from, n);
+#endif
check_object_size(from, n, true);
if (access_ok(VERIFY_WRITE, to, n))
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 8e8d20c..dfadaff 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -181,3 +181,9 @@ EXPORT_SYMBOL(__pv_offset);
EXPORT_SYMBOL(__arm_smccc_smc);
EXPORT_SYMBOL(__arm_smccc_hvc);
#endif
+
+#ifdef CONFIG_AMLOGIC_KASAN32
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
+#endif
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index f823604..a927f8a 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -101,6 +101,9 @@ __mmap_switched:
str r2, [r6] @ Save atags pointer
cmp r7, #0
strne r0, [r7] @ Save control register values
+#ifdef CONFIG_AMLOGIC_KASAN32
+ bl kasan_early_init
+#endif
b start_kernel
ENDPROC(__mmap_switched)
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 4f14b5c..cb235c9 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -25,6 +25,9 @@
#include <asm/smp_plat.h>
#include <asm/unwind.h>
#include <asm/opcodes.h>
+#ifdef CONFIG_AMLOGIC_KASAN32
+#include <linux/kasan.h>
+#endif
#ifdef CONFIG_XIP_KERNEL
/*
@@ -40,6 +43,20 @@
#ifdef CONFIG_MMU
void *module_alloc(unsigned long size)
{
+#ifdef CONFIG_AMLOGIC_KASAN32
+ void *p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR,
+ MODULES_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ NUMA_NO_NODE, __builtin_return_address(0));
+ if (!p)
+ p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
+ VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ NUMA_NO_NODE, __builtin_return_address(0));
+ if (p && (kasan_module_alloc(p, size) < 0)) {
+ vfree(p);
+ return NULL;
+ }
+ return p;
+#else
void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
@@ -48,6 +65,7 @@ void *module_alloc(unsigned long size)
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
+#endif
}
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 2b5ddfa..02bbf72 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -68,6 +68,9 @@
#ifdef CONFIG_AMLOGIC_CPU_INFO
#include <linux/amlogic/cpu_version.h>
#endif
+#ifdef CONFIG_AMLOGIC_KASAN32
+#include <asm/kasan.h>
+#endif
#include "atags.h"
@@ -1127,6 +1130,9 @@ void __init setup_arch(char **cmdline_p)
early_ioremap_reset();
paging_init(mdesc);
+#ifdef CONFIG_AMLOGIC_KASAN32
+ kasan_init();
+#endif
request_standard_resources(mdesc);
if (mdesc->restart)
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 98f8fba..4682dd2 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -246,8 +246,21 @@ static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
}
/* Before poping a register check whether it is feasible or not */
+#ifdef CONFIG_AMLOGIC_KASAN32
+/*
+ * If enabled KASAN and unwind_frame is called under IRQ routine,
+ * an value-less kasan report will trigger. Because IRQ is using
+ * thread context and don't initialized shadow memory when irq_svc
+ * saving irq context. Since it's hard to guess reserved memory for
+ * shadow in stack by compiler, so we just tell compiler do not
+ * sanitize for this function
+ */
+int __no_sanitize_address unwind_pop_register(struct unwind_ctrl_block *ctrl,
+ unsigned long **vsp, unsigned int reg)
+#else
static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
unsigned long **vsp, unsigned int reg)
+#endif
{
if (unlikely(ctrl->check_each_pop))
if (*vsp >= (unsigned long *)ctrl->sp_high)
@@ -407,7 +420,13 @@ int unwind_frame(struct stackframe *frame)
idx = unwind_find_idx(frame->pc);
if (!idx) {
+ #ifdef CONFIG_AMLOGIC_KASAN32
+ /* avoid FUCKING close source ko print too many here */
+ if (frame->pc > PAGE_OFFSET)
+ pr_warn("unwind: Index not found %08lx\n", frame->pc);
+ #else
pr_warn("unwind: Index not found %08lx\n", frame->pc);
+ #endif
return -URC_FAILURE;
}
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 64111bd..044d499 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -62,9 +62,17 @@
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
ENTRY(mmiocpy)
+#ifdef CONFIG_AMLOGIC_KASAN32
+ENTRY(__memcpy)
+#else
ENTRY(memcpy)
+#endif
#include "copy_template.S"
+#ifdef CONFIG_AMLOGIC_KASAN32
+ENDPROC(__memcpy)
+#else
ENDPROC(memcpy)
+#endif
ENDPROC(mmiocpy)
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 69a9d47..d508fe8 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -27,7 +27,11 @@
* occurring in the opposite direction.
*/
+#ifdef CONFIG_AMLOGIC_KASAN32
+ENTRY(__memmove)
+#else
ENTRY(memmove)
+#endif
UNWIND( .fnstart )
subs ip, r0, r1
@@ -224,4 +228,8 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8
+#ifdef CONFIG_AMLOGIC_KASAN32
+ENDPROC(__memmove)
+#else
ENDPROC(memmove)
+#endif
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 3c65e3b..5d23be8 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -17,7 +17,11 @@
.align 5
ENTRY(mmioset)
+#ifdef CONFIG_AMLOGIC_KASAN32
+ENTRY(__memset)
+#else
ENTRY(memset)
+#endif
UNWIND( .fnstart )
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
@@ -133,5 +137,9 @@ UNWIND( .fnstart )
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
UNWIND( .fnend )
+#ifdef CONFIG_AMLOGIC_KASAN32
+ENDPROC(__memset)
+#else
ENDPROC(memset)
+#endif
ENDPROC(mmioset)
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index e869824..6d264aa 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -107,3 +107,6 @@ obj-$(CONFIG_CACHE_L2X0_PMU) += cache-l2x0-pmu.o
obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
obj-$(CONFIG_CACHE_UNIPHIER) += cache-uniphier.o
+
+obj-$(CONFIG_AMLOGIC_KASAN32) += kasan_init.o
+KASAN_SANITIZE_kasan_init.o := n
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 4c587ad..2327e56 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -36,6 +36,9 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+#ifdef CONFIG_AMLOGIC_KASAN32
+#include <asm/kasan.h>
+#endif
#include "mm.h"
@@ -502,6 +505,9 @@ void __init mem_init(void)
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
pr_notice("Virtual kernel memory layout:\n"
+#ifdef CONFIG_AMLOGIC_KASAN32
+ " kasan : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
#ifdef CONFIG_HAVE_TCM
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
@@ -520,6 +526,9 @@ void __init mem_init(void)
" .init : 0x%p" " - 0x%p" " (%4td kB)\n"
" .data : 0x%p" " - 0x%p" " (%4td kB)\n"
" .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
+#ifdef CONFIG_AMLOGIC_KASAN32
+ MLM(KASAN_SHADOW_START, KASAN_SHADOW_END),
+#endif
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),
diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c
new file mode 100644
index 0000000..ac26904
--- a/dev/null
+++ b/arch/arm/mm/kasan_init.c
@@ -0,0 +1,218 @@
+/*
+ * This file contains kasan initialization code for ARM64.
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "kasan: " fmt
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/start_kernel.h>
+#include <linux/mm.h>
+
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+#include <asm/mach/map.h>
+#include <asm/fixmap.h>
+
+#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+
+static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
+
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). All the early functions are called too
+ * early to use lm_alias so __p*d_populate functions must be used to populate
+ * with the physical address from __pa_symbol.
+ */
+
+static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ pte_t *pte;
+ unsigned long next;
+ pgprot_t kernel_pte = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
+ L_PTE_SHARED | L_PTE_DIRTY |
+ L_PTE_MT_WRITEALLOC);
+
+ if (pmd_none(*pmd))
+ __pmd_populate(pmd, __pa_symbol(kasan_zero_pte),
+ _PAGE_KERNEL_TABLE);
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+ next = addr + PAGE_SIZE;
+ cpu_v7_set_pte_ext(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
+ kernel_pte),
+ 0);
+ } while (pte++, addr = next, addr != end && pte_none(*pte));
+}
+
+static void __init kasan_early_pmd_populate(pud_t *pud,
+ unsigned long addr,
+ unsigned long end)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ if (pud_none(*pud))
+ pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ kasan_early_pte_populate(pmd, addr, next);
+ } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
+}
+
+static void __init kasan_early_pud_populate(pgd_t *pgd,
+ unsigned long addr,
+ unsigned long end)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ if (pgd_none(*pgd))
+ pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ kasan_early_pmd_populate(pud, addr, next);
+ } while (pud++, addr = next, addr != end && pud_none(*pud));
+}
+
+static void __init kasan_map_early_shadow(unsigned long start,
+ unsigned long end)
+{
+ unsigned long addr = start;
+ unsigned long next;
+ pgd_t *pgd;
+
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ kasan_early_pud_populate(pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+}
+
+asmlinkage void __init kasan_early_init(void)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+ kasan_map_early_shadow(KASAN_SHADOW_START, KASAN_SHADOW_END);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long virt)
+{
+ return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+}
+
+static void __init clear_pmds(unsigned long start,
+ unsigned long end)
+{
+ /*
+ * Remove references to kasan page tables from
+ * swapper_pg_dir. pmd_clear() can't be used
+ * here because it's nop on 2,3-level pagetable setups
+ */
+ for (; start < end; start += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(start));
+}
+
+static void kasan_alloc_and_map_shadow(unsigned long start, unsigned long end)
+{
+ struct map_desc desc;
+ unsigned long size;
+ phys_addr_t l_shadow;
+
+ size = (end - start) >> KASAN_SHADOW_SCALE_SHIFT;
+ l_shadow = memblock_alloc(size, SECTION_SIZE);
+ WARN(!l_shadow, "%s, reserve %ld shadow memory failed",
+ __func__, size);
+
+ desc.virtual = (unsigned long)kasan_mem_to_shadow((void *)start);
+ desc.pfn = __phys_to_pfn(l_shadow);
+ desc.length = size;
+ desc.type = MT_MEMORY_RW;
+ create_mapping(&desc);
+ pr_info("KASAN shadow, virt:[%lx-%lx], phys:%x, size:%lx\n",
+ start, end, l_shadow, size);
+}
+
+void __init kasan_init(void)
+{
+ unsigned long start, end;
+ int i;
+
+ /*
+ * We are going to perform proper setup of shadow memory.
+ * At first we should unmap early shadow (clear_pmds() call bellow).
+ * However, instrumented code couldn't execute without shadow memory.
+ * tmp_pg_dir used to keep early shadow mapped until full shadow
+ * setup will be finished.
+ */
+ memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
+ dsb(ishst);
+ cpu_switch_mm(tmp_pg_dir, &init_mm);
+ clear_pmds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ kasan_alloc_and_map_shadow(PAGE_OFFSET, KMEM_END);
+ kasan_alloc_and_map_shadow(FIXADDR_START, FIXADDR_END);
+#ifdef CONFIG_HIGHMEM
+ kasan_alloc_and_map_shadow(PKMAP_BASE,
+ PKMAP_BASE + LAST_PKMAP * PAGE_SIZE);
+#endif
+
+ /*
+ * populate zero page for vmalloc area and other gap area
+ * TODO:
+ * Need check kasan for vmalloc?
+ */
+ start = (ulong)kasan_mem_to_shadow((void *)MODULES_VADDR);
+ kasan_map_early_shadow(KASAN_SHADOW_START, start);
+
+ start = (ulong)kasan_mem_to_shadow((void *)KMEM_END);
+ end = (ulong)kasan_mem_to_shadow((void *)FIXADDR_START);
+ kasan_map_early_shadow(start, end);
+
+ /*
+ * KAsan may reuse the contents of kasan_zero_pte directly, so we
+ * should make sure that it maps the zero page read-only.
+ */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte_ext(&kasan_zero_pte[i],
+ pfn_pte(sym_to_pfn(kasan_zero_page),
+ PAGE_KERNEL_RO), 0);
+
+ memset(kasan_zero_page, 0, PAGE_SIZE);
+ cpu_switch_mm(swapper_pg_dir, &init_mm);
+ local_flush_tlb_all();
+ flush_cache_all();
+
+ /* clear all shawdow memory before kasan running */
+ memset(kasan_mem_to_shadow((void *)PAGE_OFFSET), 0,
+ (KMEM_END - PAGE_OFFSET) >> KASAN_SHADOW_SCALE_SHIFT);
+ memset(kasan_mem_to_shadow((void *)FIXADDR_START), 0,
+ (FIXADDR_END - FIXADDR_START) >> KASAN_SHADOW_SCALE_SHIFT);
+#ifdef CONFIG_HIGHMEM
+ memset(kasan_mem_to_shadow((void *)PKMAP_BASE), 0,
+ (LAST_PKMAP * PAGE_SIZE) >> KASAN_SHADOW_SCALE_SHIFT);
+#endif
+
+ /* At this point kasan is fully initialized. Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KernelAddressSanitizer initialized\n");
+}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4d3da37..c7e41d4 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -37,6 +37,9 @@
#include <asm/mach/map.h>
#include <asm/mach/pci.h>
#include <asm/fixmap.h>
+#ifdef CONFIG_AMLOGIC_KASAN32
+#include <asm/kasan.h>
+#endif
#include "fault.h"
#include "mm.h"
@@ -951,7 +954,11 @@ static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
* offsets, and we take full advantage of sections and
* supersections.
*/
+#ifdef CONFIG_AMLOGIC_KASAN32
+void __init create_mapping(struct map_desc *md)
+#else
static void __init create_mapping(struct map_desc *md)
+#endif
{
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
@@ -1121,6 +1128,7 @@ void __init debug_ll_io_init(void)
}
#endif
+#ifndef CONFIG_AMLOGIC_KASAN32
static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
@@ -1149,6 +1157,7 @@ static int __init early_vmalloc(char *arg)
return 0;
}
early_param("vmalloc", early_vmalloc);
+#endif
phys_addr_t arm_lowmem_limit __initdata = 0;
@@ -1166,7 +1175,11 @@ void __init adjust_lowmem_bounds(void)
* and may itself be outside the valid range for which phys_addr_t
* and therefore __pa() is defined.
*/
+#ifdef CONFIG_AMLOGIC_KASAN32
+ vmalloc_limit = (u64)(KMEM_END - PAGE_OFFSET + PHYS_OFFSET);
+#else
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+#endif
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
@@ -1244,8 +1257,19 @@ static inline void prepare_page_table(void)
/*
* Clear out all the mappings below the kernel image.
*/
+#ifdef CONFIG_AMLOGIC_KASAN32
+ for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) {
+ /*
+ * keep pre-initialized kasan shadow memory MMU before
+ * kasan really eanbled
+ */
+ if (addr < KASAN_SHADOW_START || addr >= KASAN_SHADOW_END)
+ pmd_clear(pmd_off_k(addr));
+ }
+#else
for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
+#endif
#ifdef CONFIG_XIP_KERNEL
/* The XIP kernel is mapped in the module area -- skip over it */
@@ -1321,8 +1345,16 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
/*
* Clear page table except top pmd used by early fixmaps
*/
+#ifdef CONFIG_AMLOGIC_KASAN32
+ /* we have adjusted memory map layout */
+ for (addr = VMALLOC_START;
+ addr < (PAGE_OFFSET & PMD_MASK);
+ addr += PMD_SIZE)
+ pmd_clear(pmd_off_k(addr));
+#else
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
+#endif
/*
* Map the kernel if it is XIP.
@@ -1645,10 +1677,11 @@ void __init paging_init(const struct machine_desc *mdesc)
unsigned long notrace phys_check(phys_addr_t x)
{
unsigned long addr;
- struct page *page;
addr = x - PHYS_OFFSET + PAGE_OFFSET;
+#ifndef CONFIG_AMLOGIC_KASAN32
if (scheduler_running) {
+ struct page *page;
page = phys_to_page(x);
/*
@@ -1662,16 +1695,19 @@ unsigned long notrace phys_check(phys_addr_t x)
dump_stack();
}
}
+#endif
return addr;
}
EXPORT_SYMBOL(phys_check);
unsigned long notrace virt_check(unsigned long x)
{
+#ifndef CONFIG_AMLOGIC_KASAN32
if (scheduler_running && (x >= VMALLOC_START || x < PAGE_OFFSET)) {
pr_err("bad input of virt:%lx\n", x);
dump_stack();
}
+#endif
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
}
EXPORT_SYMBOL(virt_check);
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 59a8fa7..fa1c45b 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+KASAN_SANITIZE_vgettimeofday.o := n
+
CFLAGS_REMOVE_vdso.o = -pg
# Force -O2 to avoid libgcc dependencies
diff --git a/drivers/amlogic/memory_ext/Kconfig b/drivers/amlogic/memory_ext/Kconfig
index 88856a72..f40e0e3 100644
--- a/drivers/amlogic/memory_ext/Kconfig
+++ b/drivers/amlogic/memory_ext/Kconfig
@@ -40,10 +40,23 @@ config AMLOGIC_CMA
Amlogic CMA optimization for cma alloc/free problems
Including policy change of CMA usage
+config AMLOGIC_KASAN32
+ bool "Amlogic KASAN support for 32bit kernel"
+ depends on AMLOGIC_MEMORY_EXTEND
+ depends on KASAN
+ depends on !64BIT
+ default y
+ help
+ Amlogic implementation for KASAN on 32bit ARM kernel.
+ Which can help to debug memory of use-after-free,
+ out-of-bounds and other problems. This config will
+ change memory layout.
+
config AMLOGIC_VMAP
bool "Amlogic kernel stack"
depends on AMLOGIC_MEMORY_EXTEND
depends on !KASAN
+ depends on !AMLOGIC_KASAN32
default y
help
This config is used to enable amlogic kernel stack
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 186f267..d25d998 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -161,6 +161,18 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
&prop);
#ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_KASAN32
+ {
+ unsigned long lowmem_size;
+
+ /* fix for cma overlap 2 zone */
+ lowmem_size = KMEM_END - PAGE_OFFSET;
+ lowmem_size += CONFIG_PHYS_OFFSET;
+ if (start < lowmem_size && end > lowmem_size) {
+ end = lowmem_size - SZ_4M;
+ }
+ }
+ #endif
#ifdef CONFIG_PHYS_ADDR_T_64BIT
pr_info("%s, start:%pa, end:%pa, len:%ld MiB\n",
__func__, &start, &end,
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 8622541..35408ec 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -416,8 +416,18 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
if (redzone_adjust > 0)
*size += redzone_adjust;
+#ifdef CONFIG_AMLOGIC_KASAN32 /* compile problem */
+ {
+ size_t s1;
+
+ s1 = max(*size, cache->object_size +
+ optimal_redzone(cache->object_size));
+ *size = s1 >= KMALLOC_MAX_SIZE ? KMALLOC_MAX_SIZE : s1;
+ }
+#else
*size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
optimal_redzone(cache->object_size)));
+#endif
/*
* If the metadata doesn't fit, don't enable KASAN at all.
@@ -569,8 +579,13 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
+ #ifdef CONFIG_AMLOGIC_KASAN32 /* for compile problems */
+ kasan_report_double_free(cache, object,
+ __builtin_return_address(0));
+ #else
kasan_report_double_free(cache, object,
__builtin_return_address(1));
+ #endif
return true;
}
@@ -731,6 +746,11 @@ static void register_global(struct kasan_global *global)
{
size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+#ifdef CONFIG_AMLOGIC_KASAN32
+ /* avoid FUCKING close source ko panic here */
+ if ((unsigned long)global->beg < MODULES_VADDR)
+ return;
+#endif
kasan_unpoison_shadow(global->beg, global->size);
kasan_poison_shadow(global->beg + aligned_size,