summaryrefslogtreecommitdiff
authorTao Zeng <tao.zeng@amlogic.com>2019-09-17 07:00:12 (GMT)
committer Jianxin Pan <jianxin.pan@amlogic.com>2019-09-20 02:46:28 (GMT)
commit89fd94cefb3134775a8b3dcf5d9d5bac77098123 (patch)
tree0d8dbf73ddf4f76c760edd7ec4eb1ff6acc1b1d8
parentbd353a10b37e494ae7e2275134db8ded1a4b646a (diff)
downloadcommon-89fd94cefb3134775a8b3dcf5d9d5bac77098123.zip
common-89fd94cefb3134775a8b3dcf5d9d5bac77098123.tar.gz
common-89fd94cefb3134775a8b3dcf5d9d5bac77098123.tar.bz2
vmap: fix wrong mmu setting in check sp funciton [1/1]
PD#TV-9668 Problem: If sp address is in linear mapping range, check_sp_fault_again function in vmap fault handler will still map a new page for it. This will cause some data in R/W section polluted. Solution: Avoid map page if sp is in linear range. Verify: TL1 x301 Change-Id: I0e02a2048b586854c528cd3eeafb725751b9dc82 Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
Diffstat
-rw-r--r--drivers/amlogic/memory_ext/vmap_stack.c30
-rw-r--r--fs/proc/meminfo.c6
2 files changed, 30 insertions, 6 deletions
diff --git a/drivers/amlogic/memory_ext/vmap_stack.c b/drivers/amlogic/memory_ext/vmap_stack.c
index 4d567ae..2d25772 100644
--- a/drivers/amlogic/memory_ext/vmap_stack.c
+++ b/drivers/amlogic/memory_ext/vmap_stack.c
@@ -52,6 +52,8 @@
static unsigned long stack_shrink_jiffies;
static unsigned char vmap_shrink_enable;
static atomic_t vmap_stack_size;
+static atomic_t vmap_fault_count;
+static atomic_t vmap_pre_handle_count;
static struct aml_vmap *avmap;
#ifdef CONFIG_ARM64
@@ -454,6 +456,18 @@ static void check_sp_fault_again(struct pt_regs *regs)
#endif
addr = sp - sizeof(*regs);
+ /*
+ * When we handle vmap stack fault, we are in pre-allcated
+ * per-cpu vmap stack. But if sp is near bottom of a page and we
+ * return to normal handler, sp may down grow to another page
+ * to cause a vmap fault again. So we need map next page for
+ * stack before page-fault happen.
+ *
+ * But we need check sp is realy in vmap stack range.
+ */
+ if (!is_vmap_addr(addr)) /* addr may in linear mapping */
+ return;
+
if (sp && ((addr & PAGE_MASK) != (sp & PAGE_MASK))) {
/*
* will fault when we copy back context, so handle
@@ -477,6 +491,7 @@ static void check_sp_fault_again(struct pt_regs *regs)
mod_delayed_work(system_highpri_wq, &avmap->mwork, 0);
D("map page:%5lx for addr:%lx\n", page_to_pfn(page), addr);
+ atomic_inc(&vmap_pre_handle_count);
#if DEBUG
show_fault_stack(addr, regs);
#endif
@@ -557,6 +572,7 @@ int handle_vmap_fault(unsigned long addr, unsigned int esr,
if (cache <= (VMAP_CACHE_PAGE / 2))
mod_delayed_work(system_highpri_wq, &avmap->mwork, 0);
+ atomic_inc(&vmap_fault_count);
D("map page:%5lx for addr:%lx\n", page_to_pfn(page), addr);
#if DEBUG
show_fault_stack(addr, regs);
@@ -586,6 +602,20 @@ static int shrink_vm_stack(unsigned long low, unsigned long high)
return pages;
}
+void arch_report_meminfo(struct seq_file *m)
+{
+ unsigned long kb = 1 << (PAGE_SHIFT - 10);
+ unsigned long tmp1, tmp2, tmp3;
+
+ tmp1 = kb * atomic_read(&vmap_stack_size);
+ tmp2 = kb * atomic_read(&vmap_fault_count);
+ tmp3 = kb * atomic_read(&vmap_pre_handle_count);
+
+ seq_printf(m, "VmapStack: %8ld kB\n", tmp1);
+ seq_printf(m, "VmapFault: %8ld kB\n", tmp2);
+ seq_printf(m, "VmapPfault: %8ld kB\n", tmp3);
+}
+
static unsigned long get_task_stack_floor(unsigned long sp)
{
unsigned long end;
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 58bd174..8a42849 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -15,9 +15,6 @@
#ifdef CONFIG_CMA
#include <linux/cma.h>
#endif
-#ifdef CONFIG_AMLOGIC_VMAP
-#include <linux/amlogic/vmap_stack.h>
-#endif
#include <asm/page.h>
#include <asm/pgtable.h>
#include "internal.h"
@@ -156,9 +153,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "CmaFree: ",
global_page_state(NR_FREE_CMA_PAGES));
#endif
-#ifdef CONFIG_AMLOGIC_VMAP
- show_val_kb(m, "VmapStack: ", get_vmap_stack_size());
-#endif
hugetlb_report_meminfo(m);