author | tao zeng <tao.zeng@amlogic.com> | 2018-02-07 02:18:07 (GMT) |
---|---|---|
committer | jie.yuan <jie.yuan@amlogic.com> | 2018-03-20 03:07:17 (GMT) |
commit | 7bb27d1645dc389f1c518796260d8f2d4ead3290 (patch) | |
tree | 179ad4085aa1761fb4f8b3d907bcdd686ea7cbb4 | |
parent | 379cb08c8c595eb8165085b6982dd0632010d59c (diff) | |
download | common-7bb27d1645dc389f1c518796260d8f2d4ead3290.zip common-7bb27d1645dc389f1c518796260d8f2d4ead3290.tar.gz common-7bb27d1645dc389f1c518796260d8f2d4ead3290.tar.bz2 |
memory: merge modification of tao.zeng [8/9]
mm: Improve page trace and mm debug function
PD#160479: mm: Improve page trace and mm debug function
1. Use page->flags to store trace value in 64bit Kernel;
And this feature can be enabled default, without any
memory usage increase;
2. Sort and optimize common caller find function in pagetrace;
3. Add some debug print for secmon/of-reserved memory.
Change-Id: I70ce1629889934a5bf31b91df570afa537339479
Signed-off-by: tao zeng <tao.zeng@amlogic.com>
-rw-r--r-- | drivers/amlogic/memory_ext/page_trace.c | 262 | ||||
-rw-r--r-- | drivers/amlogic/secmon/secmon.c | 4 | ||||
-rw-r--r-- | drivers/of/of_reserved_mem.c | 9 | ||||
-rw-r--r-- | include/linux/amlogic/page_trace.h | 25 | ||||
-rw-r--r-- | include/linux/mm.h | 4 | ||||
-rw-r--r-- | include/linux/mm_types.h | 10 |
6 files changed, 253 insertions, 61 deletions
diff --git a/drivers/amlogic/memory_ext/page_trace.c b/drivers/amlogic/memory_ext/page_trace.c index c44e205..a2fa3df 100644 --- a/drivers/amlogic/memory_ext/page_trace.c +++ b/drivers/amlogic/memory_ext/page_trace.c @@ -28,11 +28,17 @@ #include <linux/slab.h> #include <linux/list.h> #include <linux/uaccess.h> +#include <linux/vmalloc.h> #include <asm/stacktrace.h> +#include <asm/sections.h> +#ifndef CONFIG_64BIT #define DEBUG_PAGE_TRACE 0 +#else +#define DEBUG_PAGE_TRACE 0 +#endif -#define COMMON_CALLER_SIZE 20 +#define COMMON_CALLER_SIZE 24 /* * this is a driver which will hook during page alloc/free and @@ -40,12 +46,15 @@ * of page allocate statistics can be find in /proc/pagetrace * */ -static unsigned int trace_step = 1; -static bool merge_function; +static bool merge_function = 1; +unsigned int cma_alloc_trace; +static struct proc_dir_entry *dentry; +#ifndef CONFIG_64BIT struct page_trace *trace_buffer; static unsigned long ptrace_size; -static struct proc_dir_entry *dentry; +static unsigned int trace_step = 1; static bool page_trace_disable __initdata = 1; +#endif struct alloc_caller { unsigned long func_start_addr; @@ -75,6 +84,10 @@ static struct fun_symbol common_func[] __initdata = { {"__kmalloc_track_caller", 1}, {"kmem_cache_alloc_trace", 1}, {"alloc_pages_exact", 1}, + {"get_zeroed_page", 1}, + {"__vmalloc_node_range", 1}, + {"vzalloc", 1}, + {"vmalloc", 1}, {"__alloc_page_frag", 1}, {"kmalloc_order", 0}, #ifdef CONFIG_SLUB /* for some static symbols not exported in headfile */ @@ -84,7 +97,13 @@ static struct fun_symbol common_func[] __initdata = { {} /* tail */ }; -static int early_page_trace_param(char *buf) +static inline bool page_trace_invalid(struct page_trace *trace) +{ + return trace->order == IP_INVALID; +} + +#ifndef CONFIG_64BIT +static int __init early_page_trace_param(char *buf) { if (!buf) return -EINVAL; @@ -111,11 +130,7 @@ static int early_page_trace_step(char *buf) return 0; } early_param("page_trace_step", early_page_trace_step); - -static inline bool page_trace_invalid(struct page_trace *trace) -{ - return trace->order == IP_INVALID; -} +#endif #if DEBUG_PAGE_TRACE static inline bool range_ok(struct page_trace *trace) @@ -152,13 +167,14 @@ static bool check_trace_valid(struct page_trace *trace) } return true; } -#else -static inline bool check_trace_valid(struct page_trace *trace) -{ - return true; -} #endif /* DEBUG_PAGE_TRACE */ +#ifdef CONFIG_64BIT +static void push_ip(struct page_trace *base, struct page_trace *ip) +{ + *base = *ip; +} +#else static void push_ip(struct page_trace *base, struct page_trace *ip) { int i; @@ -174,6 +190,7 @@ static void push_ip(struct page_trace *base, struct page_trace *ip) base[0] = *ip; } +#endif /* CONFIG_64BIT */ static inline int is_module_addr(unsigned long ip) { @@ -262,9 +279,16 @@ static unsigned long __init kallsyms_contain_name(const char *name, long full, if (full && strcmp(namebuf, name) == 0) return kallsyms_sym_address(i); - if (!full && strstr(namebuf, name) && (off > *offset)) { - *offset = off; /* update offset for next loop */ - return kallsyms_sym_address(i); + if (!full && strstr(namebuf, name)) { + /* not include tab */ + if (!strstr(namebuf, "__kstrtab") && + !strstr(namebuf, "__kcrctab") && + !strstr(namebuf, "__ksymtab") && + (off > *offset)) { + /* update offset for next loop */ + *offset = off; + return kallsyms_sym_address(i); + } } } return 0; @@ -323,7 +347,7 @@ static void __init dump_common_caller(void) for (i = 0; i < COMMON_CALLER_SIZE; i++) { if (common_caller[i].func_start_addr) - pr_debug("%2d, addr:%lx + %4lx, %pf\n", i, + printk(KERN_DEBUG"%2d, addr:%lx + %4lx, %pf\n", i, common_caller[i].func_start_addr, common_caller[i].size, (void *)common_caller[i].func_start_addr); @@ -332,12 +356,24 @@ static void __init dump_common_caller(void) } } +static int __init sym_cmp(const void *x1, const void *x2) +{ + struct alloc_caller *p1, *p2; + + p1 = (struct alloc_caller *)x1; + p2 = (struct alloc_caller *)x2; + + /* desending order */ + return p1->func_start_addr < p2->func_start_addr ? 1 : -1; +} + static void __init find_static_common_symbol(void) { int i; unsigned long addr; struct fun_symbol *s; + memset(common_caller, 0, sizeof(common_caller)); for (i = 0; i < COMMON_CALLER_SIZE; i++) { s = &common_func[i]; if (!s->name) @@ -353,23 +389,35 @@ static void __init find_static_common_symbol(void) pr_info("can't fuzzy match:%s\n", s->name); } } + sort(common_caller, COMMON_CALLER_SIZE, sizeof(struct alloc_caller), + sym_cmp, NULL); dump_common_caller(); } static int is_common_caller(struct alloc_caller *caller, unsigned long pc) { - int i, ret = 0; - - for (i = 0; i < COMMON_CALLER_SIZE; i++) { - if (!caller[i].func_start_addr) /* end if this table */ - break; + int ret = 0, cnt = 0; + int low = 0, high = COMMON_CALLER_SIZE - 1, mid; + unsigned long add_l, add_h; - /* pc is in one of common caller */ - if ((pc >= caller[i].func_start_addr) && - (pc <= (caller[i].func_start_addr + caller[i].size))) { + while (low < high) { + mid = (high + low) / 2; + add_l = caller[mid].func_start_addr; + add_h = caller[mid].func_start_addr + caller[mid].size; + if (pc >= add_l && pc < add_h) { ret = 1; break; } + if (pc < add_l) { /* caller is desending order */ + if (mid == (low + 1)) + break; + low = mid - 1; + } else { + if (mid == (high - 1)) + break; + high = mid + 1; + } + cnt++; } return ret; } @@ -378,6 +426,9 @@ unsigned long unpack_ip(struct page_trace *trace) { unsigned long text; + if (trace->order == IP_INVALID) + return 0; + if (trace->module_flag) text = MODULES_VADDR; else @@ -386,7 +437,7 @@ unsigned long unpack_ip(struct page_trace *trace) } EXPORT_SYMBOL(unpack_ip); -static inline unsigned long find_back_trace(void) +unsigned long find_back_trace(void) { struct stackframe frame; int ret, step = 0; @@ -418,12 +469,24 @@ static inline unsigned long find_back_trace(void) return 0; } +#ifdef CONFIG_64BIT +struct page_trace *find_page_base(struct page *page) +{ + struct page_trace *trace; + + trace = (struct page_trace *)&page->trace; + return trace; +} +#else struct page_trace *find_page_base(struct page *page) { unsigned long pfn, zone_offset = 0, offset; struct zone *zone; struct page_trace *p; + if (!trace_buffer) + return NULL; + pfn = page_to_pfn(page); for_each_populated_zone(zone) { /* pfn is in this zone */ @@ -438,7 +501,20 @@ struct page_trace *find_page_base(struct page *page) } return NULL; } +#endif + +unsigned long get_page_trace(struct page *page) +{ + struct page_trace *trace; + trace = find_page_base(page); + if (trace) + return unpack_ip(trace); + + return 0; +} + +#ifndef CONFIG_64BIT static void __init set_init_page_trace(struct page *page, int order, gfp_t flag) { unsigned long text, ip; @@ -457,13 +533,49 @@ static void __init set_init_page_trace(struct page *page, int order, gfp_t flag) } } +#endif -void set_page_trace(struct page *page, int order, gfp_t gfp_flags) +unsigned int pack_ip(unsigned long ip, int order, gfp_t flag) { - unsigned long text, ip; - struct page_trace trace = {}, *base; + unsigned long text; + struct page_trace trace = {}; + + text = (unsigned long)_text; + if (ip >= (unsigned long)_text) + text = (unsigned long)_text; + else if (is_module_addr(ip)) { + text = MODULES_VADDR; + trace.module_flag = 1; + } + + trace.ret_ip = (ip - text) >> 2; + WARN_ON(trace.ret_ip > IP_RANGE_MASK); + if (flag == __GFP_BDEV) + trace.migrate_type = MIGRATE_CMA; + else + trace.migrate_type = gfpflags_to_migratetype(flag); + trace.order = order; +#if DEBUG_PAGE_TRACE + pr_debug("%s, base:%p, page:%lx, _ip:%x, o:%d, f:%x, ip:%lx\n", + __func__, base, page_to_pfn(page), + (*((unsigned int *)&trace)), order, + flag, ip); +#endif + return *((unsigned int *)&trace); +} +EXPORT_SYMBOL(pack_ip); + +void set_page_trace(struct page *page, int order, gfp_t flag) +{ + unsigned long ip; + struct page_trace *base; + unsigned int val; +#ifdef CONFIG_64BIT + if (page) { +#else if (page && trace_buffer) { +#endif ip = find_back_trace(); if (!ip) { pr_err("can't find backtrace for page:%lx\n", @@ -471,33 +583,31 @@ void set_page_trace(struct page *page, int order, gfp_t gfp_flags) dump_stack(); return; } - text = (unsigned long)_text; - if (ip >= (unsigned long)_text) - text = (unsigned long)_text; - else if (is_module_addr(ip)) { - text = MODULES_VADDR; - trace.module_flag = 1; - } - - trace.ret_ip = (ip - text) >> 2; - WARN_ON(trace.ret_ip > IP_RANGE_MASK); - if (gfp_flags == __GFP_BDEV) - trace.migrate_type = MIGRATE_CMA; - else - trace.migrate_type = gfpflags_to_migratetype(gfp_flags); - trace.order = order; + val = pack_ip(ip, order, flag); base = find_page_base(page); - #if DEBUG_PAGE_TRACE - pr_debug("%s, base:%p, page:%lx, _ip:%x, o:%d, f:%x, ip:%lx\n", - __func__, base, page_to_pfn(page), - (*((unsigned int *)&trace)), order, - gfp_flags, ip); - #endif - push_ip(base, &trace); + push_ip(base, (struct page_trace *)&val); } } EXPORT_SYMBOL(set_page_trace); +#ifdef CONFIG_64BIT +void reset_page_trace(struct page *page, int order) +{ + struct page_trace *base; + struct page *p; + int i, cnt; + + if (page) { + cnt = 1 << order; + p = page; + for (i = 0; i < cnt; i++) { + base = find_page_base(p); + base->order = IP_INVALID; + p++; + } + } +} +#else void reset_page_trace(struct page *page, int order) { struct page_trace *base; @@ -520,8 +630,10 @@ void reset_page_trace(struct page *page, int order) } } } +#endif EXPORT_SYMBOL(reset_page_trace); +#ifndef CONFIG_64BIT /* * move page out of buddy and make sure they are not malloced by * other module @@ -570,6 +682,7 @@ static int __init page_trace_pre_work(unsigned long size) } return 0; } +#endif #define SHOW_CNT 1024 struct page_summary { @@ -656,7 +769,7 @@ static inline int type_match(struct page_trace *trace, int type) static int update_page_trace(struct seq_file *m, struct zone *zone, struct page_summary *sum, int type) { - unsigned long pfn, flags; + unsigned long pfn; unsigned long start_pfn = zone->zone_start_pfn; unsigned long end_pfn = zone_end_pfn(zone); int max_trace = 0, ret; @@ -664,7 +777,6 @@ static int update_page_trace(struct seq_file *m, struct zone *zone, unsigned long ip; struct page_trace *trace; - spin_lock_irqsave(&zone->lock, flags); for (pfn = start_pfn; pfn < end_pfn; pfn++) { struct page *page; @@ -678,7 +790,9 @@ static int update_page_trace(struct seq_file *m, struct zone *zone, continue; trace = find_page_base(page); + #if DEBUG_PAGE_TRACE check_trace_valid(trace); + #endif if (page_trace_invalid(trace)) /* free pages */ continue; @@ -697,7 +811,6 @@ static int update_page_trace(struct seq_file *m, struct zone *zone, pfn += ((1 << order) - 1); } } - spin_unlock_irqrestore(&zone->lock, flags); return max_trace; } /* @@ -711,6 +824,13 @@ static int pagetrace_show(struct seq_file *m, void *arg) int mtype, ret, print_flag; struct page_summary *sum; +#ifndef CONFIG_64BIT + if (!trace_buffer) { + seq_puts(m, "page trace not enabled\n"); + return 0; + } +#endif + /* check memoryless node */ if (!node_state(p->node_id, N_MEMORY)) return 0; @@ -801,6 +921,15 @@ static ssize_t pagetrace_write(struct file *file, const char __user *buffer, pr_info("set merge_function to %d\n", merge_function); } + if (!strncmp(buf, "cma_trace=", 10)) { /* option for 'cma_trace=' */ + if (sscanf(buf, "cma_trace=%ld", &arg) < 0) { + kfree(buf); + return -EINVAL; + } + cma_alloc_trace = arg ? 1 : 0; + pr_info("set cma_trace to %d\n", cma_alloc_trace); + } + kfree(buf); return count; @@ -816,8 +945,6 @@ static const struct file_operations pagetrace_file_ops = { static int __init page_trace_module_init(void) { - if (!trace_buffer) - return -ENOMEM; dentry = proc_create("pagetrace", 0444, NULL, &pagetrace_file_ops); if (IS_ERR_OR_NULL(dentry)) { @@ -825,6 +952,11 @@ static int __init page_trace_module_init(void) return -1; } +#ifndef CONFIG_64BIT + if (!trace_buffer) + return -ENOMEM; +#endif + return 0; } @@ -838,9 +970,21 @@ module_exit(page_trace_module_exit); void __init page_trace_mem_init(void) { +#ifndef CONFIG_64BIT struct zone *zone; unsigned long total_page = 0; +#endif + find_static_common_symbol(); +#ifdef CONFIG_64BIT + /* + * if this compiler error occurs, that means there are over 32 page + * flags, you should disable AMLOGIC_PAGE_TRACE or reduce some page + * flags. + */ + BUILD_BUG_ON((__NR_PAGEFLAGS + ZONES_WIDTH) > 32); + BUILD_BUG_ON(NODES_WIDTH > 0); +#else if (page_trace_disable) return; @@ -857,6 +1001,6 @@ void __init page_trace_mem_init(void) pr_err("%s reserve memory failed\n", __func__); return; } - find_static_common_symbol(); +#endif } diff --git a/drivers/amlogic/secmon/secmon.c b/drivers/amlogic/secmon/secmon.c index 8408510..44e117c 100644 --- a/drivers/amlogic/secmon/secmon.c +++ b/drivers/amlogic/secmon/secmon.c @@ -93,7 +93,7 @@ static int secmon_probe(struct platform_device *pdev) pr_err("can't get reserve_mem_size, use default value\n"); mem_size = RESERVE_MEM_SIZE; } else - pr_debug("reserve_mem_size:0x%x\n", mem_size); + pr_info("reserve_mem_size:0x%x\n", mem_size); ret = of_reserved_mem_device_init(&pdev->dev); if (ret) { @@ -106,7 +106,7 @@ static int secmon_probe(struct platform_device *pdev) pr_err("alloc page failed, ret:%p\n", page); return -ENOMEM; } - pr_debug("get page:%p, %lx\n", page, page_to_pfn(page)); + pr_info("get page:%p, %lx\n", page, page_to_pfn(page)); sharemem_in_base = ioremap_cache(phy_in_base, IN_SIZE); if (!sharemem_in_base) { diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 366d8c3..d74821b 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -155,6 +155,10 @@ static int __init __reserved_mem_alloc_size(unsigned long node, end = start + dt_mem_next_cell(dt_root_size_cells, &prop); + #ifdef CONFIG_AMLOGIC_MODIFY + pr_info("%s, start:%llx, end:%llx, len:%llx\n", + __func__, start, end, end - start); + #endif /* CONFIG_AMLOGIC_MODIFY */ ret = early_init_dt_alloc_reserved_memory_arch(size, align, start, end, nomap, &base); if (ret == 0) { @@ -175,7 +179,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, } if (base == 0) { + #ifdef CONFIG_AMLOGIC_MODIFY + pr_info("failed to allocate memory for node %s, size:%ld MB\n", + uname, (unsigned long)size / SZ_1M); + #else pr_info("failed to allocate memory for node '%s'\n", uname); + #endif /* CONFIG_AMLOGIC_MODIFY */ return -ENOMEM; } diff --git a/include/linux/amlogic/page_trace.h b/include/linux/amlogic/page_trace.h index d3b0aea..83c6fde 100644 --- a/include/linux/amlogic/page_trace.h +++ b/include/linux/amlogic/page_trace.h @@ -19,6 +19,9 @@ #define __PAGE_TRACE_H__ #include <asm/memory.h> +#include <asm/stacktrace.h> +#include <asm/sections.h> +#include <linux/page-flags.h> /* * bit map lay out for _ret_ip table @@ -49,6 +52,8 @@ /* max order usually should not be 15 */ #define IP_INVALID (0xf) +struct page; + /* this struct should not larger than 32 bit */ struct page_trace { unsigned int ret_ip :24; @@ -58,11 +63,16 @@ struct page_trace { }; #ifdef CONFIG_AMLOGIC_PAGE_TRACE +extern unsigned int cma_alloc_trace; extern unsigned long unpack_ip(struct page_trace *trace); +extern unsigned int pack_ip(unsigned long ip, int order, gfp_t flag); extern void set_page_trace(struct page *page, int order, gfp_t gfp_flags); extern void reset_page_trace(struct page *page, int order); extern void page_trace_mem_init(void); extern struct page_trace *find_page_base(struct page *page); +extern unsigned long find_back_trace(void); +extern unsigned long get_page_trace(struct page *page); +extern void show_data(unsigned long addr, int nbytes, const char *name); #else static inline unsigned long unpack_ip(struct page_trace *trace) { @@ -81,8 +91,23 @@ static inline struct page_trace *find_page_base(struct page *page) { return NULL; } +static unsigned long find_back_trace(void) +{ + return 0; +} #endif +#ifdef CONFIG_AMLOGIC_SLUB_DEBUG +#include <linux/slub_def.h> +extern int aml_slub_check_object(struct kmem_cache *s, void *p, void *q); +extern void aml_get_slub_trace(struct kmem_cache *s, struct page *page, + gfp_t flags, int order); +extern void aml_put_slub_trace(struct page *page, struct kmem_cache *s); +extern int aml_check_kmemcache(struct kmem_cache_cpu *c, struct kmem_cache *s, + void *object); +extern void aml_slub_set_trace(struct kmem_cache *s, void *object); +#endif /* CONFIG_AMLOGIC_SLUB_DEBUG */ + #ifdef CONFIG_KALLSYMS extern const unsigned long kallsyms_addresses[] __weak; extern const int kallsyms_offsets[] __weak; diff --git a/include/linux/mm.h b/include/linux/mm.h index a4b2d36..1584a00 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -697,7 +697,11 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) +#if defined(CONFIG_AMLOGIC_PAGE_TRACE) && defined(CONFIG_64BIT) +#define ZONES_PGOFF ((sizeof(unsigned int) * 8) - ZONES_WIDTH) +#else #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) +#endif /* CONFIG_AMLOGIC_PAGE_TRACE */ #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) /* diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5942478..3166105 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -44,8 +44,18 @@ struct mem_cgroup; */ struct page { /* First double word block */ +#if defined(CONFIG_AMLOGIC_PAGE_TRACE) && defined(CONFIG_64BIT) + union { + unsigned long flags; + struct { + unsigned int s_flags; + unsigned int trace; + }; + }; +#else unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ +#endif /* CONFIG_AMLOGIC_PAGE_TRACE */ union { struct address_space *mapping; /* If low bit clear, points to * inode address_space, or NULL. |