summaryrefslogtreecommitdiff
authortao zeng <tao.zeng@amlogic.com>2018-02-24 07:40:59 (GMT)
committer jie.yuan <jie.yuan@amlogic.com>2018-03-20 03:07:36 (GMT)
commit4217bd8bb30e3c0d9c7391e831ba78bb96a92a6b (patch)
tree12ed65de4b59affc600cae4b24261f7fea8c69e9
parent7bb27d1645dc389f1c518796260d8f2d4ead3290 (diff)
downloadcommon-4217bd8bb30e3c0d9c7391e831ba78bb96a92a6b.zip
common-4217bd8bb30e3c0d9c7391e831ba78bb96a92a6b.tar.gz
common-4217bd8bb30e3c0d9c7391e831ba78bb96a92a6b.tar.bz2
memory: merge modification of tao.zeng [9/9]
mm: optimize for CMA allocate time PD#159608: mm: optimize for CMA allocate time 1. Make all amlogic-changed mm code configuarable, which are wrapped by CONFIG_AMLOGIC_CMA/CONFIG_AMLOGIC_MEMORY_EXTEND 2. Implement some core code of CMA to a single file: drivers/amlogic/memory_ext/aml_cma.c 3. detailed imporove steps: a) use NOOP as default IO-scheduler for nand based storage. which can avoid long time wait for page lock found in CFQ scheduler; b) use per-cpu thread to allocate CMA concurrent when driver request large amount CMA memory; these threads have high user nice value to reduce schedule delay; c) increase task user nice of mmc queue and kswapd. d) wake up kswapd if page are hold by kswap shrink list and cma isolated test failed. e) Fobidden low user nice task use CMA, which can avoid priority inversion problem. f) optimize for LRU usage, devide each type of LRU to 2 parts, normal pages are linked after LRU head, CMA pages are linked after cma_list. g) avoid compaction case move cma forbidden pages to cma area. h) Increase strength of lowmemory killer. 4. Improve read speed of /proc/pagetrace, a filter can be set to reduce message which not print functions allocate memory less than filter value: echo filter=xxx > /proc/pagetrace Change-Id: Ie79288b7947aa642e4f7eacc25565559a73660df Signed-off-by: tao zeng <tao.zeng@amlogic.com>
Diffstat
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/amlogic/memory_ext/Kconfig18
-rw-r--r--drivers/amlogic/memory_ext/Makefile2
-rw-r--r--drivers/amlogic/memory_ext/aml_cma.c778
-rw-r--r--drivers/amlogic/memory_ext/aml_slub_debug.c208
-rw-r--r--drivers/amlogic/memory_ext/page_trace.c266
-rw-r--r--drivers/mmc/card/queue.c8
-rw-r--r--drivers/staging/android/lowmemorykiller.c75
-rw-r--r--fs/block_dev.c4
-rw-r--r--include/linux/amlogic/aml_cma.h93
-rw-r--r--include/linux/amlogic/page_trace.h2
-rw-r--r--include/linux/cma.h5
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/mm_inline.h39
-rw-r--r--include/linux/mmzone.h27
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--mm/cma.c84
-rw-r--r--mm/compaction.c33
-rw-r--r--mm/internal.h10
-rw-r--r--mm/mmzone.c4
-rw-r--r--mm/page_alloc.c420
-rw-r--r--mm/page_isolation.c41
-rw-r--r--mm/readahead.c5
-rw-r--r--mm/slab_common.c10
-rw-r--r--mm/swap.c22
-rw-r--r--mm/vmscan.c93
-rw-r--r--mm/vmstat.c20
27 files changed, 1686 insertions, 605 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 2ff2467..ddef510 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13484,6 +13484,10 @@ AMLOGIC driver for memory extend
M: Tao Zeng <tao.zeng@amlogic.com>
F: drivers/amlogic/memory_ext/*
+AMLOGIC driver for memory extend
+M: Tao Zeng <tao.zeng@amlogic.com>
+F: include/linux/amlogic/aml_cma.h
+
AMLOGIC driver for pmu
M: Tao Zeng <tao.zeng@amlogic.com>
F: drivers/amlogic/power/*
diff --git a/drivers/amlogic/memory_ext/Kconfig b/drivers/amlogic/memory_ext/Kconfig
index 4ea17c1..8a1433a 100644
--- a/drivers/amlogic/memory_ext/Kconfig
+++ b/drivers/amlogic/memory_ext/Kconfig
@@ -18,3 +18,21 @@ config AMLOGIC_PAGE_TRACE
information is stored in a pre-allocated memory block. And can be shown
with allocate page count information of each caller functions from
/proc/pagetrace
+
+config AMLOGIC_CMA
+ bool "Amlogic CMA change"
+ depends on AMLOGIC_MEMORY_EXTEND
+ depends on CMA
+ default y
+ help
+ Amlogic CMA optimization for cma alloc/free problems
+ Including policy change of CMA usage
+
+config AMLOGIC_SLUB_DEBUG
+ bool "Amlogic debug for trace all slub objects"
+ depends on AMLOGIC_PAGE_TRACE
+ depends on SLUB
+ default n
+ help
+ This option open trace debug for each slub objects. And will give
+ slub objdect allocator information when detected bad objects
diff --git a/drivers/amlogic/memory_ext/Makefile b/drivers/amlogic/memory_ext/Makefile
index 673d8b3..93b0037 100644
--- a/drivers/amlogic/memory_ext/Makefile
+++ b/drivers/amlogic/memory_ext/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_AMLOGIC_PAGE_TRACE) += page_trace.o
+obj-$(CONFIG_AMLOGIC_CMA) += aml_cma.o
+obj-$(CONFIG_AMLOGIC_SLUB_DEBUG) += aml_slub_debug.o
diff --git a/drivers/amlogic/memory_ext/aml_cma.c b/drivers/amlogic/memory_ext/aml_cma.c
new file mode 100644
index 0000000..dceb78a
--- a/dev/null
+++ b/drivers/amlogic/memory_ext/aml_cma.c
@@ -0,0 +1,778 @@
+/*
+ * drivers/amlogic/memory_ext/aml_cma.c
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/stddef.h>
+#include <linux/compiler.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched/rt.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/swap.h>
+#include <linux/migrate.h>
+#include <linux/cpu.h>
+#include <linux/page-isolation.h>
+#include <linux/spinlock_types.h>
+#include <linux/amlogic/aml_cma.h>
+#include <linux/hugetlb.h>
+#include <trace/events/page_isolation.h>
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+#include <linux/amlogic/page_trace.h>
+#endif /* CONFIG_AMLOGIC_PAGE_TRACE */
+
+struct work_cma {
+ struct list_head list;
+ unsigned long pfn;
+ unsigned long count;
+ int ret;
+};
+
+struct cma_pcp {
+ struct list_head list;
+ struct completion start;
+ struct completion end;
+ spinlock_t list_lock;
+ int cpu;
+};
+
+static bool can_boost;
+static DEFINE_PER_CPU(struct cma_pcp, cma_pcp_thread);
+
+DEFINE_SPINLOCK(cma_iso_lock);
+static atomic_t cma_allocate;
+
+int cma_alloc_ref(void)
+{
+ return atomic_read(&cma_allocate);
+}
+EXPORT_SYMBOL(cma_alloc_ref);
+
+void get_cma_alloc_ref(void)
+{
+ atomic_inc(&cma_allocate);
+}
+EXPORT_SYMBOL(get_cma_alloc_ref);
+
+void put_cma_alloc_ref(void)
+{
+ atomic_dec(&cma_allocate);
+}
+EXPORT_SYMBOL(put_cma_alloc_ref);
+
+static __read_mostly unsigned long total_cma_pages;
+static atomic_long_t nr_cma_allocated;
+unsigned long get_cma_allocated(void)
+{
+ return atomic_long_read(&nr_cma_allocated);
+}
+EXPORT_SYMBOL(get_cma_allocated);
+
+unsigned long get_total_cmapages(void)
+{
+ return total_cma_pages;
+}
+EXPORT_SYMBOL(get_total_cmapages);
+
+void cma_page_count_update(long diff)
+{
+ total_cma_pages += diff / PAGE_SIZE;
+}
+EXPORT_SYMBOL(cma_page_count_update);
+
+#define RESTRIC_ANON 0
+#define ANON_RATIO 60
+
+bool can_use_cma(gfp_t gfp_flags)
+{
+#if RESTRIC_ANON
+ unsigned long anon_cma;
+#endif /* RESTRIC_ANON */
+
+ if (cma_forbidden_mask(gfp_flags))
+ return false;
+
+ /*
+ * do not use cma pages when cma allocate is working. this is the
+ * weakest condition
+ */
+ if (cma_alloc_ref())
+ return false;
+
+ if (task_nice(current) > 0)
+ return false;
+
+#if RESTRIC_ANON
+ /*
+ * calculate if there are enough space for anon_cma
+ */
+ if (!(gfp_flags & __GFP_COLD)) {
+ anon_cma = global_page_state(NR_INACTIVE_ANON_CMA) +
+ global_page_state(NR_ACTIVE_ANON_CMA);
+ if (anon_cma * 100 > total_cma_pages * ANON_RATIO)
+ return false;
+ }
+#endif /* RESTRIC_ANON */
+
+ return true;
+}
+EXPORT_SYMBOL(can_use_cma);
+
+bool cma_page(struct page *page)
+{
+ int migrate_type = 0;
+
+ if (!page)
+ return false;
+ migrate_type = get_pageblock_migratetype(page);
+ if (is_migrate_cma(migrate_type) ||
+ is_migrate_isolate(migrate_type)) {
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL(cma_page);
+
+
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+static void update_cma_page_trace(struct page *page, unsigned long cnt)
+{
+ long i;
+
+ if (page == NULL)
+ return;
+
+ if (cma_alloc_trace)
+ pr_info("%s alloc page:%lx, count:%ld, func:%pf\n", __func__,
+ page_to_pfn(page), cnt, (void *)find_back_trace());
+ for (i = 0; i < cnt; i++) {
+ set_page_trace(page, 0, __GFP_BDEV);
+ page++;
+ }
+}
+#endif /* CONFIG_AMLOGIC_PAGE_TRACE */
+
+void aml_cma_alloc_pre_hook(int *dummy, int count)
+{
+ get_cma_alloc_ref();
+
+ /* temperary increase task priority if allocate many pages */
+ *dummy = task_nice(current);
+ if (count >= (pageblock_nr_pages / 2))
+ set_user_nice(current, -18);
+}
+EXPORT_SYMBOL(aml_cma_alloc_pre_hook);
+
+void aml_cma_alloc_post_hook(int *dummy, int count, struct page *page)
+{
+ put_cma_alloc_ref();
+ if (page)
+ atomic_long_add(count, &nr_cma_allocated);
+ if (count >= (pageblock_nr_pages / 2))
+ set_user_nice(current, *dummy);
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+ update_cma_page_trace(page, count);
+#endif /* CONFIG_AMLOGIC_PAGE_TRACE */
+}
+EXPORT_SYMBOL(aml_cma_alloc_post_hook);
+
+void aml_cma_release_hook(int count, struct page *pages)
+{
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+ if (cma_alloc_trace)
+ pr_info("%s free page:%lx, count:%d, func:%pf\n", __func__,
+ page_to_pfn(pages), count, (void *)find_back_trace());
+#endif /* CONFIG_AMLOGIC_PAGE_TRACE */
+ atomic_long_sub(count, &nr_cma_allocated);
+}
+EXPORT_SYMBOL(aml_cma_release_hook);
+
+static unsigned long get_align_pfn_low(unsigned long pfn)
+{
+ return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
+ pageblock_nr_pages) - 1);
+}
+
+static unsigned long get_align_pfn_high(unsigned long pfn)
+{
+ return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
+ pageblock_nr_pages));
+}
+
+static struct page *get_migrate_page(struct page *page, unsigned long private,
+ int **resultp)
+{
+ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_BDEV;
+
+ /*
+ * TODO: allocate a destination hugepage from a nearest neighbor node,
+ * accordance with memory policy of the user process if possible. For
+ * now as a simple work-around, we use the next node for destination.
+ */
+ if (PageHuge(page))
+ return alloc_huge_page_node(page_hstate(compound_head(page)),
+ next_node_in(page_to_nid(page),
+ node_online_map));
+
+ if (PageHighMem(page))
+ gfp_mask |= __GFP_HIGHMEM;
+
+ return alloc_page(gfp_mask);
+}
+
+/* [start, end) must belong to a single zone. */
+static int aml_alloc_contig_migrate_range(struct compact_control *cc,
+ unsigned long start,
+ unsigned long end, bool boost)
+{
+ /* This function is based on compact_zone() from compaction.c. */
+ unsigned long nr_reclaimed;
+ unsigned long pfn = start;
+ unsigned int tries = 0;
+ int ret = 0;
+
+ if (!boost)
+ migrate_prep();
+
+ while (pfn < end || !list_empty(&cc->migratepages)) {
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (list_empty(&cc->migratepages)) {
+ cc->nr_migratepages = 0;
+ pfn = isolate_migratepages_range(cc, pfn, end);
+ if (!pfn) {
+ ret = -EINTR;
+ break;
+ }
+ tries = 0;
+ } else if (++tries == 5) {
+ ret = ret < 0 ? ret : -EBUSY;
+ break;
+ }
+
+ nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
+ &cc->migratepages);
+ cc->nr_migratepages -= nr_reclaimed;
+
+ ret = migrate_pages(&cc->migratepages, get_migrate_page,
+ NULL, 0, cc->mode, MR_CMA);
+ }
+ if (ret < 0) {
+ putback_movable_pages(&cc->migratepages);
+ return ret;
+ }
+ return 0;
+}
+
+
+static int cma_boost_work_func(void *cma_data)
+{
+ struct cma_pcp *c_work;
+ struct work_cma *job;
+ unsigned long pfn, end;
+ int ret = -1;
+ int this_cpu;
+ struct compact_control cc = {
+ .nr_migratepages = 0,
+ .order = -1,
+ .mode = MIGRATE_SYNC,
+ .page_type = COMPACT_CMA,
+ .ignore_skip_hint = true,
+ };
+
+ c_work = (struct cma_pcp *)cma_data;
+ for (;;) {
+ ret = wait_for_completion_interruptible(&c_work->start);
+ if (ret < 0) {
+ pr_err("%s wait for task %d is %d\n",
+ __func__, c_work->cpu, ret);
+ continue;
+ }
+ this_cpu = get_cpu();
+ put_cpu();
+ if (this_cpu != c_work->cpu) {
+ pr_err("%s, cpu %d is not work cpu:%d\n",
+ __func__, this_cpu, c_work->cpu);
+ }
+ spin_lock(&c_work->list_lock);
+ if (list_empty(&c_work->list)) {
+ /* NO job todo ? */
+ pr_err("%s,%d, list empty\n", __func__, __LINE__);
+ spin_unlock(&c_work->list_lock);
+ goto next;
+ }
+ job = list_first_entry(&c_work->list, struct work_cma, list);
+ list_del(&job->list);
+ spin_unlock(&c_work->list_lock);
+
+ INIT_LIST_HEAD(&cc.migratepages);
+ lru_add_drain();
+ pfn = job->pfn;
+ cc.zone = page_zone(pfn_to_page(pfn));
+ end = pfn + job->count;
+ ret = aml_alloc_contig_migrate_range(&cc, pfn, end, 1);
+ job->ret = ret;
+ if (!ret) {
+ lru_add_drain();
+ drain_local_pages(NULL);
+ }
+ if (ret)
+ pr_debug("%s, failed, ret:%d\n", __func__, ret);
+next:
+ complete(&c_work->end);
+ if (kthread_should_stop()) {
+ pr_err("%s task exit\n", __func__);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int __init init_cma_boost_task(void)
+{
+ int cpu;
+ struct task_struct *task;
+ struct cma_pcp *work;
+ char task_name[20] = {};
+
+ for_each_possible_cpu(cpu) {
+ memset(task_name, 0, sizeof(task_name));
+ sprintf(task_name, "cma_task%d", cpu);
+ work = &per_cpu(cma_pcp_thread, cpu);
+ init_completion(&work->start);
+ init_completion(&work->end);
+ INIT_LIST_HEAD(&work->list);
+ spin_lock_init(&work->list_lock);
+ work->cpu = cpu;
+ task = kthread_create(cma_boost_work_func, work, task_name);
+ if (!IS_ERR(task)) {
+ kthread_bind(task, cpu);
+ set_user_nice(task, -17);
+ pr_debug("create cma task%p, for cpu %d\n", task, cpu);
+ wake_up_process(task);
+ } else {
+ can_boost = 0;
+ pr_err("create task for cpu %d fail:%p\n", cpu, task);
+ return -1;
+ }
+ }
+ can_boost = 1;
+ return 0;
+}
+module_init(init_cma_boost_task);
+
+int cma_alloc_contig_boost(unsigned long start_pfn, unsigned long count)
+{
+ struct cpumask has_work;
+ int cpu, cpus, i = 0, ret = 0, ebusy = 0, einv = 0;
+ atomic_t ok;
+ unsigned long delta;
+ unsigned long cnt;
+ unsigned long flags;
+ struct cma_pcp *work;
+ struct work_cma job[NR_CPUS] = {};
+
+ cpumask_clear(&has_work);
+
+ cpus = num_online_cpus();
+ cnt = count;
+ delta = count / cpus;
+ atomic_set(&ok, 0);
+ local_irq_save(flags);
+ for_each_online_cpu(cpu) {
+ work = &per_cpu(cma_pcp_thread, cpu);
+ spin_lock(&work->list_lock);
+ INIT_LIST_HEAD(&job[cpu].list);
+ job[cpu].pfn = start_pfn + i * delta;
+ job[cpu].count = delta;
+ job[cpu].ret = -1;
+ if (i == cpus - 1)
+ job[cpu].count = count - i * delta;
+ cpumask_set_cpu(cpu, &has_work);
+ list_add(&job[cpu].list, &work->list);
+ spin_unlock(&work->list_lock);
+ complete(&work->start);
+ i++;
+ }
+ local_irq_restore(flags);
+
+ for_each_cpu(cpu, &has_work) {
+ work = &per_cpu(cma_pcp_thread, cpu);
+ wait_for_completion(&work->end);
+ if (job[cpu].ret) {
+ if (job[cpu].ret != -EBUSY)
+ einv++;
+ else
+ ebusy++;
+ }
+ }
+
+ if (einv)
+ ret = -EINVAL;
+ else if (ebusy)
+ ret = -EBUSY;
+ else
+ ret = 0;
+
+ if (ret < 0 && ret != -EBUSY) {
+ pr_err("%s, failed, ret:%d, ok:%d\n",
+ __func__, ret, atomic_read(&ok));
+ }
+
+ return ret;
+}
+
+/*
+ * Some of these functions are implemented from page_isolate.c
+ */
+static bool can_free_list_page(struct page *page, struct list_head *list)
+{
+#if 0
+ unsigned long flags;
+ bool ret = false;
+
+ if (!spin_trylock_irqsave(&cma_iso_lock, flags))
+ return ret;
+
+ if (!(page->flags & PAGE_FLAGS_CHECK_AT_FREE) &&
+ !PageSwapBacked(page) &&
+ (page->lru.next != LIST_POISON1)) {
+ if (list_empty(&page->lru))
+ list_add(&page->lru, list);
+ else
+ list_move(&page->lru, list);
+ ret = true;
+ }
+ spin_unlock_irqrestore(&cma_iso_lock, flags);
+ return ret;
+#else
+ return false;
+#endif
+}
+
+static int __aml_check_pageblock_isolate(unsigned long pfn,
+ unsigned long end_pfn,
+ bool skip_hwpoisoned_pages,
+ struct list_head *list)
+{
+ struct page *page;
+
+ while (pfn < end_pfn) {
+ if (!pfn_valid_within(pfn)) {
+ pfn++;
+ continue;
+ }
+ page = pfn_to_page(pfn);
+ if (PageBuddy(page)) {
+ /*
+ * If the page is on a free list, it has to be on
+ * the correct MIGRATE_ISOLATE freelist. There is no
+ * simple way to verify that as VM_BUG_ON(), though.
+ */
+ pfn += 1 << page_private(page);
+ } else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
+ /*
+ * The HWPoisoned page may be not in buddy
+ * system, and page_count() is not 0.
+ */
+ pfn++;
+ } else {
+ /* This page can be freed ? */
+ if (!page_count(page)) {
+ if (can_free_list_page(page, list)) {
+ pfn++;
+ continue;
+ }
+ }
+ break;
+ }
+ }
+ return pfn;
+}
+
+static inline struct page *
+check_page_valid(unsigned long pfn, unsigned long nr_pages)
+{
+ int i;
+
+ for (i = 0; i < nr_pages; i++)
+ if (pfn_valid_within(pfn + i))
+ break;
+ if (unlikely(i == nr_pages))
+ return NULL;
+ return pfn_to_page(pfn + i);
+}
+
+int aml_check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
+ bool skip_hwpoisoned_pages)
+{
+ unsigned long pfn, flags;
+ struct page *page;
+ struct zone *zone;
+ struct list_head free_list;
+
+ /*
+ * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
+ * are not aligned to pageblock_nr_pages.
+ * Then we just check migratetype first.
+ */
+ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
+ page = check_page_valid(pfn, pageblock_nr_pages);
+ if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
+ break;
+ }
+ page = check_page_valid(start_pfn, end_pfn - start_pfn);
+ if ((pfn < end_pfn) || !page)
+ return -EBUSY;
+ /* Check all pages are free or marked as ISOLATED */
+ zone = page_zone(page);
+ INIT_LIST_HEAD(&free_list);
+ spin_lock_irqsave(&zone->lock, flags);
+ pfn = __aml_check_pageblock_isolate(start_pfn, end_pfn,
+ skip_hwpoisoned_pages,
+ &free_list);
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ trace_test_pages_isolated(start_pfn, end_pfn, pfn);
+
+ free_hot_cold_page_list(&free_list, 1);
+ /* page may in kswap ? */
+ if (pfn < end_pfn && zone->zone_pgdat)
+ wake_up_interruptible(&zone->zone_pgdat->kswapd_wait);
+
+ return pfn < end_pfn ? -EBUSY : 0;
+}
+
+
+int aml_cma_alloc_range(unsigned long start, unsigned long end)
+{
+ unsigned long outer_start, outer_end;
+ int ret = 0, order;
+ int try_times = 0;
+ int boost_ok = 0;
+
+ struct compact_control cc = {
+ .nr_migratepages = 0,
+ .order = -1,
+ .zone = page_zone(pfn_to_page(start)),
+ .mode = MIGRATE_SYNC,
+ .page_type = COMPACT_CMA,
+ .ignore_skip_hint = true,
+ };
+ INIT_LIST_HEAD(&cc.migratepages);
+
+ ret = start_isolate_page_range(get_align_pfn_low(start),
+ get_align_pfn_high(end), MIGRATE_CMA,
+ false);
+ if (ret)
+ return ret;
+
+try_again:
+ /*
+ * try to use more cpu to do this job when alloc count is large
+ */
+ if ((num_online_cpus() > 1) && can_boost &&
+ ((end - start) >= pageblock_nr_pages / 2)) {
+ get_online_cpus();
+ ret = cma_alloc_contig_boost(start, end - start);
+ put_online_cpus();
+ boost_ok = !ret ? 1 : 0;
+ } else
+ ret = aml_alloc_contig_migrate_range(&cc, start, end, 0);
+
+ if (ret && ret != -EBUSY)
+ goto done;
+
+ ret = 0;
+ if (!boost_ok) {
+ lru_add_drain_all();
+ drain_all_pages(cc.zone);
+ }
+ order = 0;
+ outer_start = start;
+ while (!PageBuddy(pfn_to_page(outer_start))) {
+ if (++order >= MAX_ORDER) {
+ ret = -EBUSY;
+ try_times++;
+ if (try_times < 10)
+ goto try_again;
+ goto done;
+ }
+ outer_start &= ~0UL << order;
+ }
+
+ if (outer_start != start) {
+ order = page_private(pfn_to_page(outer_start)); /* page order */
+
+ /*
+ * outer_start page could be small order buddy page and
+ * it doesn't include start page. Adjust outer_start
+ * in this case to report failed page properly
+ * on tracepoint in test_pages_isolated()
+ */
+ if (outer_start + (1UL << order) <= start)
+ outer_start = start;
+ }
+
+ /* Make sure the range is really isolated. */
+ if (aml_check_pages_isolated(outer_start, end, false)) {
+ pr_debug("%s check_pages_isolated(%lx, %lx) failed\n",
+ __func__, outer_start, end);
+ try_times++;
+ if (try_times < 10)
+ goto try_again;
+ ret = -EBUSY;
+ goto done;
+ }
+
+ /* Grab isolated pages from freelists. */
+ outer_end = isolate_freepages_range(&cc, outer_start, end);
+ if (!outer_end) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ /* Free head and tail (if any) */
+ if (start != outer_start)
+ aml_cma_free(outer_start, start - outer_start);
+ if (end != outer_end)
+ aml_cma_free(end, outer_end - end);
+
+done:
+ undo_isolate_page_range(get_align_pfn_low(start),
+ get_align_pfn_high(end), MIGRATE_CMA);
+ return ret;
+}
+EXPORT_SYMBOL(aml_cma_alloc_range);
+
+static int __aml_cma_free_check(struct page *page, int order, unsigned int *cnt)
+{
+ int i;
+ int ref = 0;
+
+ /*
+ * clear ref count, head page should avoid this operation.
+ * ref count of head page will be cleared when __free_pages
+ * is called.
+ */
+ for (i = 1; i < (1 << order); i++) {
+ if (!put_page_testzero(page + i))
+ ref++;
+ }
+ if (ref) {
+ pr_info("%s, %d pages are still in use\n", __func__, ref);
+ *cnt += ref;
+ return -1;
+ }
+ return 0;
+}
+
+static inline unsigned long
+__find_buddy_index(unsigned long page_idx, unsigned int order)
+{
+ return page_idx ^ (1 << order);
+}
+
+static int aml_cma_get_page_order(struct page *page, int cur_order)
+{
+ int order, ret = 0;
+ unsigned long page_idx, buddy_idx, combined_idx, flags;
+ struct page *buddy, *raw;
+ struct zone *zone;
+
+ /*
+ * same as __free_one_page, but we only need to find out freed buddy
+ * and it's order of it.
+ */
+ buddy = page;
+ raw = page;
+ page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
+ zone = page_zone(page);
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = cur_order; order < MAX_ORDER; order++) {
+ buddy_idx = __find_buddy_index(page_idx, order);
+ buddy = page + (buddy_idx - page_idx);
+ combined_idx = buddy_idx & page_idx;
+ page = page + (combined_idx - page_idx);
+ page_idx = combined_idx;
+ if (PageBuddy(buddy)) {
+ ret = page_private(buddy);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ WARN(ret >= MAX_ORDER, "%s page:%lx, buddy:%lx:%ld, corder:%d,ret:%d\n",
+ __func__, page_to_pfn(raw), page_to_pfn(buddy),
+ page_private(buddy), cur_order, ret);
+ return 0;
+}
+
+void aml_cma_free(unsigned long pfn, unsigned int nr_pages)
+{
+ unsigned int count = 0;
+ struct page *page;
+ int free_order, start_order = 0;
+ int batch;
+
+ while (nr_pages) {
+ page = pfn_to_page(pfn);
+ batch = (1 << start_order);
+ if (__aml_cma_free_check(page, start_order, &count))
+ break;
+
+ __free_pages(page, start_order);
+ free_order = aml_cma_get_page_order(page, start_order);
+ pr_debug("pages:%4d, free:%2d, start:%2d, batch:%4d, pfn:%lx\n",
+ nr_pages, free_order,
+ start_order, batch, pfn);
+ nr_pages -= batch;
+ pfn += batch;
+ /*
+ * since pages are contigunous, and it's buddy already has large
+ * order, we can try to free same oder as free_order to get more
+ * quickly free speed.
+ */
+ if (free_order < 0) {
+ start_order = 0;
+ continue;
+ }
+ if (nr_pages >= (1 << free_order)) {
+ start_order = free_order;
+ } else {
+ /* remain pages is not enough */
+ start_order = 0;
+ while (nr_pages >= (1 << start_order))
+ start_order++;
+ start_order--;
+ }
+ }
+ WARN(count != 0, "%d pages are still in use!\n", count);
+}
+EXPORT_SYMBOL(aml_cma_free);
+
+static int __init aml_cma_init(void)
+{
+ atomic_set(&cma_allocate, 0);
+ atomic_long_set(&nr_cma_allocated, 0);
+
+ return 0;
+}
+arch_initcall(aml_cma_init);
diff --git a/drivers/amlogic/memory_ext/aml_slub_debug.c b/drivers/amlogic/memory_ext/aml_slub_debug.c
new file mode 100644
index 0000000..3d287e2
--- a/dev/null
+++ b/drivers/amlogic/memory_ext/aml_slub_debug.c
@@ -0,0 +1,208 @@
+/*
+ * drivers/amlogic/memory_ext/aml_slub_debug.c
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/amlogic/page_trace.h>
+#include <linux/gfp.h>
+#include <linux/proc_fs.h>
+#include <linux/kallsyms.h>
+#include <linux/mmzone.h>
+#include <linux/memblock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/sort.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#define ADBG pr_debug
+
+static void show_obj_trace_around(void *obj)
+{
+ struct page *page;
+ struct zone *zone;
+ long pfn, i, start_pfn, end_pfn;
+
+ pfn = virt_to_pfn(obj);
+ if (!pfn_valid(pfn)) {
+ pr_info("%s, invalid pfn:%lx, obj:%p\n", __func__, pfn, obj);
+ return;
+ }
+ page = pfn_to_page(pfn);
+ zone = page_zone(page);
+ start_pfn = zone->zone_start_pfn;
+ end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ for (i = pfn - 5; i < pfn + 5; i++) {
+ if (i < start_pfn)
+ i = start_pfn;
+ if (i >= end_pfn)
+ i = end_pfn - 1;
+ page = pfn_to_page(i);
+ pr_info("%s, obj:%p, page:%lx, allocator:%pf\n",
+ __func__, obj, i, (void *)get_page_trace(page));
+ }
+}
+
+static void dump_obj_trace(void *obj, struct kmem_cache *s)
+{
+ struct page *page;
+ unsigned long trace, addr;
+ int i, cnt, ip;
+ int order;
+
+ page = virt_to_head_page(obj);
+ order = s->max.x >> 16;
+ cnt = PAGE_SIZE * (1 << order) / s->size;
+ pr_info("%s, obj:%p, page:%lx, s:%s %d, order:%d, cnt:%d, s_mem:%p\n",
+ __func__, obj, page_to_pfn(page), s->name,
+ s->size, order, cnt, page->s_mem);
+ if (s->size >= PAGE_SIZE) {
+ pr_info("%s, slab:%s, addr:%lx, trace:%pf\n",
+ __func__, s->name, (unsigned long)obj, page->s_mem);
+ } else {
+ addr = (unsigned long)page_address(page);
+ if (page->s_mem) {
+ for (i = 0; i < cnt; i++) {
+ ip = ((int *)page->s_mem)[i];
+ trace = unpack_ip((struct page_trace *)&ip);
+ pr_info("%s, addr:%lx, i:%d, trace:%pf\n",
+ __func__, addr + i * s->size,
+ i, (void *)trace);
+ }
+ }
+ }
+}
+
+static int get_obj_index(void *obj, struct kmem_cache *s)
+{
+ struct page *page;
+ unsigned long diff;
+ int idx;
+
+ page = virt_to_head_page(obj);
+ diff = (unsigned long)obj - (unsigned long)page_address(page);
+ idx = diff / s->size;
+
+ ADBG("%s, obj:%p, page:%lx, diff:%lx, idx:%d, size:%d, s:%s\n",
+ __func__, obj, page_to_pfn(page), diff, idx, s->size, s->name);
+
+ return idx;
+}
+
+int aml_slub_check_object(struct kmem_cache *s, void *obj, void *new_obj)
+{
+ void *p;
+
+ if (new_obj && (unsigned long)new_obj < PAGE_OFFSET) {
+ p = obj + s->offset;
+ pr_info("%s s:%s, obj:%p, offset:%x, bad new object:%p\n",
+ __func__, s->name, obj, s->offset, obj);
+ show_obj_trace_around(obj);
+ dump_obj_trace(obj, s);
+ show_data((unsigned long)p - 256, 512, "obj");
+ dump_stack();
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int aml_check_kmemcache(struct kmem_cache_cpu *c, struct kmem_cache *s,
+ void *object)
+{
+ if (c && (unsigned long)c < PAGE_OFFSET) {
+ show_obj_trace_around(s);
+ pr_err("%s, bad cpu cache:%p, c:%s\n", __func__, c, s->name);
+ dump_stack();
+ return -EINVAL;
+ }
+
+ if (object && (unsigned long)object < PAGE_OFFSET) {
+ show_obj_trace_around(c);
+ pr_err("%s, bad object:%p, c:%p, c:%s\n",
+ __func__, object, c, s->name);
+ dump_stack();
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void aml_get_slub_trace(struct kmem_cache *s, struct page *page,
+ gfp_t flags, int order)
+{
+ int obj_count;
+ void *p;
+
+ if (!page)
+ return;
+
+ page->s_mem = NULL;
+ /* allocate trace data */
+ if (page && !(flags & __GFP_BDEV) && s->size < PAGE_SIZE) {
+ obj_count = PAGE_SIZE * (1 << order) / s->size;
+ ADBG("%s, s:%s, size:%d, osize:%d\n",
+ __func__, s->name, s->size, s->object_size);
+ ADBG("%s, order:%d, count:%d, flags:%x\n",
+ __func__, order, obj_count, flags);
+ p = kzalloc(obj_count * 4, GFP_KERNEL | __GFP_BDEV);
+ page->s_mem = p;
+ ADBG("%s, page:%lx, s_mem:%p\n",
+ __func__, page_to_pfn(page), page->s_mem);
+ }
+}
+
+void aml_put_slub_trace(struct page *page, struct kmem_cache *s)
+{
+ if (!page)
+ return;
+
+ if (page->s_mem && s->size < PAGE_SIZE) {
+ ADBG("%s, %d, free page:%lx, s_mem:%p\n",
+ __func__, __LINE__, page_to_pfn(page), page->s_mem);
+ kfree(page->s_mem);
+ }
+ page->s_mem = NULL;
+}
+
+void aml_slub_set_trace(struct kmem_cache *s, void *object)
+{
+ unsigned long trace;
+ unsigned int *p, ip, idx;
+ struct page *page;
+
+ if (!object)
+ return;
+ page = virt_to_head_page(object);
+ trace = find_back_trace();
+ if (s->size >= PAGE_SIZE) {
+ page->s_mem = (void *)trace;
+ ADBG("%s, page:%lx, trace:%pf, s:%s, size:%d\n",
+ __func__, page_to_pfn(page),
+ page->s_mem, s->name, s->size);
+ } else {
+ ip = pack_ip(trace, 0, 0);
+ idx = get_obj_index(object, s);
+ if (page->s_mem) {
+ p = page->s_mem;
+ p[idx] = ip;
+ ADBG("%s, page:%lx, trace:%pf\n",
+ __func__, page_to_pfn(page), (void *)trace);
+ ADBG("s:%s, size:%d, s_mem:%p, idx:%d, ip:%x\n",
+ s->name, s->size, page->s_mem, idx, ip);
+ }
+ }
+}
diff --git a/drivers/amlogic/memory_ext/page_trace.c b/drivers/amlogic/memory_ext/page_trace.c
index a2fa3df..95be8ea 100644
--- a/drivers/amlogic/memory_ext/page_trace.c
+++ b/drivers/amlogic/memory_ext/page_trace.c
@@ -47,6 +47,7 @@
*
*/
static bool merge_function = 1;
+static int page_trace_filter = 64; /* not print size < page_trace_filter */
unsigned int cma_alloc_trace;
static struct proc_dir_entry *dentry;
#ifndef CONFIG_64BIT
@@ -80,6 +81,7 @@ static struct fun_symbol common_func[] __initdata = {
{"__kmalloc", 1},
{"cma_alloc", 1},
{"dma_alloc_from_contiguous", 1},
+ {"aml_cma_alloc_post_hook", 1},
{"__dma_alloc", 1},
{"__kmalloc_track_caller", 1},
{"kmem_cache_alloc_trace", 1},
@@ -396,11 +398,11 @@ static void __init find_static_common_symbol(void)
static int is_common_caller(struct alloc_caller *caller, unsigned long pc)
{
- int ret = 0, cnt = 0;
+ int ret = 0;
int low = 0, high = COMMON_CALLER_SIZE - 1, mid;
unsigned long add_l, add_h;
- while (low < high) {
+ while (1) {
mid = (high + low) / 2;
add_l = caller[mid].func_start_addr;
add_h = caller[mid].func_start_addr + caller[mid].size;
@@ -408,16 +410,20 @@ static int is_common_caller(struct alloc_caller *caller, unsigned long pc)
ret = 1;
break;
}
- if (pc < add_l) { /* caller is desending order */
- if (mid == (low + 1))
- break;
- low = mid - 1;
- } else {
- if (mid == (high - 1))
- break;
- high = mid + 1;
- }
- cnt++;
+
+ if (low >= high) /* still not match */
+ break;
+
+ if (pc < add_l) /* caller is desending order */
+ low = mid + 1;
+ else
+ high = mid - 1;
+
+ /* fix range */
+ if (high < 0)
+ high = 0;
+ if (low > (COMMON_CALLER_SIZE - 1))
+ low = COMMON_CALLER_SIZE - 1;
}
return ret;
}
@@ -550,10 +556,14 @@ unsigned int pack_ip(unsigned long ip, int order, gfp_t flag)
trace.ret_ip = (ip - text) >> 2;
WARN_ON(trace.ret_ip > IP_RANGE_MASK);
+#ifdef CONFIG_AMLOGIC_CMA
if (flag == __GFP_BDEV)
trace.migrate_type = MIGRATE_CMA;
else
trace.migrate_type = gfpflags_to_migratetype(flag);
+#else
+ trace.migrate_type = gfpflags_to_migratetype(flag);
+#endif /* CONFIG_AMLOGIC_CMA */
trace.order = order;
#if DEBUG_PAGE_TRACE
pr_debug("%s, base:%p, page:%lx, _ip:%x, o:%d, f:%x, ip:%lx\n",
@@ -684,12 +694,41 @@ static int __init page_trace_pre_work(unsigned long size)
}
#endif
-#define SHOW_CNT 1024
+/*--------------------------sysfs node -------------------------------*/
+#define LARGE 512
+#define SMALL 128
+
+/* caller for unmovalbe are max */
+#define MT_UNMOVABLE_IDX 0 /* 0,UNMOVABLE */
+#define MT_MOVABLE_IDX (MT_UNMOVABLE_IDX + LARGE) /* 1,MOVABLE */
+#define MT_RECLAIMABLE_IDX (MT_MOVABLE_IDX + SMALL) /* 2,RECLAIMABLE */
+#define MT_HIGHATOMIC_IDX (MT_RECLAIMABLE_IDX + SMALL) /* 3,HIGHATOMIC */
+#define MT_CMA_IDX (MT_HIGHATOMIC_IDX + SMALL) /* 4,CMA */
+#define MT_ISOLATE_IDX (MT_CMA_IDX + SMALL) /* 5,ISOLATE */
+
+#define SHOW_CNT (MT_ISOLATE_IDX)
+
+static int mt_offset[] = {
+ MT_UNMOVABLE_IDX,
+ MT_MOVABLE_IDX,
+ MT_RECLAIMABLE_IDX,
+ MT_HIGHATOMIC_IDX,
+ MT_CMA_IDX,
+ MT_ISOLATE_IDX,
+ MT_ISOLATE_IDX + SMALL
+};
+
struct page_summary {
unsigned long ip;
unsigned int cnt;
};
+struct pagetrace_summary {
+ struct page_summary *sum;
+ unsigned long ticks;
+ int mt_cnt[MIGRATE_TYPES];
+};
+
static unsigned long find_ip_base(unsigned long ip)
{
unsigned long size, offset;
@@ -704,7 +743,8 @@ static unsigned long find_ip_base(unsigned long ip)
}
static int find_page_ip(struct page_trace *trace,
- struct page_summary *sum, int *o)
+ struct page_summary *sum, int *o,
+ int range, int *mt_cnt)
{
int i = 0;
int order;
@@ -713,9 +753,7 @@ static int find_page_ip(struct page_trace *trace,
*o = 0;
ip = unpack_ip(trace);
order = trace->order;
- if (merge_function)
- ip = find_ip_base(ip);
- for (i = 0; i < SHOW_CNT; i++) {
+ for (i = 0; i < range; i++) {
if (sum[i].ip == ip) {
/* find */
sum[i].cnt += (1 << order);
@@ -726,10 +764,11 @@ static int find_page_ip(struct page_trace *trace,
sum[i].cnt += (1 << order);
sum[i].ip = ip;
*o = order;
- return 1;
+ mt_cnt[trace->migrate_type]++;
+ return 0;
}
}
- return 0;
+ return -ERANGE;
}
#define K(x) ((x) << (PAGE_SHIFT - 10))
@@ -743,40 +782,81 @@ static int trace_cmp(const void *x1, const void *x2)
}
static void show_page_trace(struct seq_file *m,
- struct page_summary *sum, int cnt, int type)
+ struct page_summary *sum, int *mt_cnt)
{
- int i;
- unsigned long total = 0;
+ int i, j;
+ struct page_summary *p;
+ unsigned long total;
- if (!cnt)
- return;
- sort(sum, cnt, sizeof(*sum), trace_cmp, NULL);
- for (i = 0; i < cnt; i++) {
- seq_printf(m, "%8d, %16lx, %pf\n",
- K(sum[i].cnt), sum[i].ip, (void *)sum[i].ip);
- total += sum[i].cnt;
- }
+ seq_printf(m, "%s %s %s\n",
+ "count(KB)", "kaddr", "function");
seq_puts(m, "------------------------------\n");
- seq_printf(m, "total pages:%ld, %ld kB, type:%s\n",
- total, K(total), migratetype_names[type]);
+ for (j = 0; j < MIGRATE_TYPES; j++) {
+
+ if (!mt_cnt[j]) /* this migrate type is empty */
+ continue;
+
+ p = sum + mt_offset[j];
+ sort(p, mt_cnt[j], sizeof(*p), trace_cmp, NULL);
+
+ total = 0;
+ for (i = 0; i < mt_cnt[j]; i++) {
+ if (!p[i].cnt) /* may be empty after merge */
+ continue;
+
+ if (K(p[i].cnt) >= page_trace_filter) {
+ seq_printf(m, "%8d, %16lx, %pf\n",
+ K(p[i].cnt), p[i].ip,
+ (void *)p[i].ip);
+ }
+ total += p[i].cnt;
+ }
+ seq_puts(m, "------------------------------\n");
+ seq_printf(m, "total pages:%ld, %ld kB, type:%s\n",
+ total, K(total), migratetype_names[j]);
+ seq_puts(m, "------------------------------\n");
+ }
}
-static inline int type_match(struct page_trace *trace, int type)
+static void merge_same_function(struct page_summary *sum, int *mt_cnt)
{
- return (trace->migrate_type) == type;
+ int i, j, k, range;
+ struct page_summary *p;
+
+ for (i = 0; i < MIGRATE_TYPES; i++) {
+ range = mt_cnt[i];
+ p = sum + mt_offset[i];
+
+ /* first, replace all ip to entry of each function */
+ for (j = 0; j < range; j++)
+ p[j].ip = find_ip_base(p[j].ip);
+
+ /* second, loop and merge same ip */
+ for (j = 0; j < range; j++) {
+ for (k = j + 1; k < range; k++) {
+ if (p[k].ip != (-1ul) &&
+ p[k].ip == p[j].ip) {
+ p[j].cnt += p[k].cnt;
+ p[k].ip = (-1ul);
+ p[k].cnt = 0;
+ }
+ }
+ }
+ }
}
static int update_page_trace(struct seq_file *m, struct zone *zone,
- struct page_summary *sum, int type)
+ struct page_summary *sum, int *mt_cnt)
{
unsigned long pfn;
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone_end_pfn(zone);
- int max_trace = 0, ret;
+ int ret = 0, mt;
int order;
- unsigned long ip;
struct page_trace *trace;
+ struct page_summary *p;
+ /* loop whole zone */
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
struct page *page;
@@ -799,30 +879,29 @@ static int update_page_trace(struct seq_file *m, struct zone *zone,
if (!(*(unsigned int *)trace)) /* empty */
continue;
- if (type_match(trace, type)) {
- ret = find_page_ip(trace, sum, &order);
- if (max_trace == SHOW_CNT && ret) {
- ip = unpack_ip(trace);
- pr_err("MAX sum cnt, pfn:%ld, lr:%lx, %pf\n",
- pfn, ip, (void *)ip);
- } else
- max_trace += ret;
- if (order)
- pfn += ((1 << order) - 1);
+ mt = trace->migrate_type;
+ p = sum + mt_offset[mt];
+ ret = find_page_ip(trace, p, &order,
+ mt_offset[mt + 1] - mt_offset[mt], mt_cnt);
+ if (ret) {
+ pr_err("mt type:%d, out of range:%d\n",
+ mt, mt_offset[mt + 1] - mt_offset[mt]);
+ break;
}
+ if (order)
+ pfn += ((1 << order) - 1);
}
- return max_trace;
+ if (merge_function)
+ merge_same_function(sum, mt_cnt);
+ return ret;
}
-/*
- * This prints out statistics in relation to grouping pages by mobility.
- * It is expensive to collect so do not constantly read the file.
- */
+
static int pagetrace_show(struct seq_file *m, void *arg)
{
pg_data_t *p = (pg_data_t *)arg;
struct zone *zone;
- int mtype, ret, print_flag;
- struct page_summary *sum;
+ int ret, size = sizeof(struct page_summary) * SHOW_CNT;
+ struct pagetrace_summary *sum;
#ifndef CONFIG_64BIT
if (!trace_buffer) {
@@ -835,29 +914,41 @@ static int pagetrace_show(struct seq_file *m, void *arg)
if (!node_state(p->node_id, N_MEMORY))
return 0;
- sum = vmalloc(sizeof(struct page_summary) * SHOW_CNT);
- if (!sum)
- return -ENOMEM;
+ if (!m->private) {
+ sum = kzalloc(sizeof(*sum), GFP_KERNEL);
+ if (!sum)
+ return -ENOMEM;
+
+ m->private = sum;
+ sum->sum = vzalloc(size);
+ if (!sum->sum) {
+ kfree(sum);
+ m->private = NULL;
+ return -ENOMEM;
+ }
- for_each_populated_zone(zone) {
- print_flag = 0;
- seq_printf(m, "Node %d, zone %8s\n", p->node_id, zone->name);
- for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
- memset(sum, 0, sizeof(struct page_summary) * SHOW_CNT);
- ret = update_page_trace(m, zone, sum, mtype);
- if (ret > 0) {
- seq_printf(m, "%s %s %s\n",
- "count(KB)", "kaddr", "function");
- seq_puts(m, "------------------------------\n");
- show_page_trace(m, sum, ret, mtype);
- seq_puts(m, "\n");
- print_flag = 1;
+ /* update only once */
+ sum->ticks = sched_clock();
+ for_each_populated_zone(zone) {
+ memset(sum->sum, 0, size);
+ ret = update_page_trace(m, zone, sum->sum, sum->mt_cnt);
+ if (ret) {
+ seq_printf(m, "Error %d in Node %d, zone %8s\n",
+ ret, p->node_id, zone->name);
+ continue;
}
}
- if (print_flag)
- seq_puts(m, "------------------------------\n");
+ sum->ticks = sched_clock() - sum->ticks;
+ }
+
+ sum = (struct pagetrace_summary *)m->private;
+ for_each_populated_zone(zone) {
+ seq_printf(m, "Node %d, zone %8s\n", p->node_id, zone->name);
+ show_page_trace(m, sum->sum, sum->mt_cnt);
}
- vfree(sum);
+ seq_printf(m, "SHOW_CNT:%d, buffer size:%d, tick:%ld ns\n",
+ SHOW_CNT, size, sum->ticks);
+
return 0;
}
@@ -879,7 +970,9 @@ static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
pg_data_t *pgdat = (pg_data_t *)arg;
(*pos)++;
- return next_online_pgdat(pgdat);
+ pgdat = next_online_pgdat(pgdat);
+
+ return pgdat;
}
static void frag_stop(struct seq_file *m, void *arg)
@@ -897,6 +990,21 @@ static int pagetrace_open(struct inode *inode, struct file *file)
return seq_open(file, &pagetrace_op);
}
+static int pagetrace_release(struct inode *inode, struct file *file)
+{
+ struct pagetrace_summary *sum;
+ struct seq_file *m = file->private_data;
+
+ if (m->private) {
+ sum = (struct pagetrace_summary *)m->private;
+ if (sum->sum)
+ vfree(sum->sum);
+ kfree(sum);
+ }
+
+ return seq_release(inode, file);
+}
+
static ssize_t pagetrace_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -929,6 +1037,14 @@ static ssize_t pagetrace_write(struct file *file, const char __user *buffer,
cma_alloc_trace = arg ? 1 : 0;
pr_info("set cma_trace to %d\n", cma_alloc_trace);
}
+ if (!strncmp(buf, "filter=", 7)) { /* option for 'filter=' */
+ if (sscanf(buf, "filter=%ld", &arg) < 0) {
+ kfree(buf);
+ return -EINVAL;
+ }
+ page_trace_filter = arg;
+ pr_info("set filter to %d KB\n", page_trace_filter);
+ }
kfree(buf);
@@ -940,7 +1056,7 @@ static const struct file_operations pagetrace_file_ops = {
.read = seq_read,
.llseek = seq_lseek,
.write = pagetrace_write,
- .release = seq_release,
+ .release = pagetrace_release,
};
static int __init page_trace_module_init(void)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 1810f76..a5fbdba 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -26,6 +26,10 @@
#define MMC_QUEUE_BOUNCESZ 65536
+#ifdef CONFIG_AMLOGIC_CMA
+#include <linux/sched.h>
+#endif /* CONFIG_AMLOGIC_CMA */
+
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
@@ -62,6 +66,10 @@ static int mmc_queue_thread(void *d)
current->flags |= PF_MEMALLOC;
+#ifdef CONFIG_AMLOGIC_CMA
+ set_user_nice(current, -15);
+#endif /* CONFIG_AMLOGIC_CMA */
+
down(&mq->thread_sem);
do {
struct request *req = NULL;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 687be36..8774de0 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -43,6 +43,10 @@
#include <linux/profile.h>
#include <linux/notifier.h>
+#ifdef CONFIG_AMLOGIC_CMA
+#include <linux/amlogic/aml_cma.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include "trace/lowmemorykiller.h"
@@ -81,6 +85,47 @@ static unsigned long lowmem_count(struct shrinker *s,
global_node_page_state(NR_INACTIVE_FILE);
}
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+static unsigned long forgeround_jiffes;
+static void show_task_adj(void)
+{
+#define SHOW_PRIFIX "score_adj:%5d, rss:%5lu"
+ struct task_struct *tsk;
+ int tasksize;
+
+ /* avoid print too many */
+ if (time_after(forgeround_jiffes, jiffies))
+ return;
+
+ forgeround_jiffes = jiffies + HZ * 5;
+ show_mem(0);
+ lowmem_print(1, "Foreground task killed, show all Candidates\n");
+ for_each_process(tsk) {
+ struct task_struct *p;
+ short oom_score_adj;
+
+ if (tsk->flags & PF_KTHREAD)
+ continue;
+ p = find_lock_task_mm(tsk);
+ if (!p)
+ continue;
+ oom_score_adj = p->signal->oom_score_adj;
+ tasksize = get_mm_rss(p->mm);
+ task_unlock(p);
+ #ifdef CONFIG_ZRAM
+ lowmem_print(1, SHOW_PRIFIX ", rswap:%5lu, task:%5d, %s\n",
+ oom_score_adj, get_mm_rss(p->mm),
+ get_mm_counter(p->mm, MM_SWAPENTS),
+ p->pid, p->comm);
+ #else
+ lowmem_print(1, SHOW_PRIFIX ", task:%5d, %s\n",
+ oom_score_adj, get_mm_rss(p->mm),
+ p->pid, p->comm);
+ #endif /* CONFIG_ZRAM */
+ }
+}
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
+
static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
@@ -98,6 +143,20 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
global_node_page_state(NR_SHMEM) -
global_node_page_state(NR_UNEVICTABLE) -
total_swapcache_pages();
+#ifdef CONFIG_AMLOGIC_CMA
+ int free_cma = 0;
+ int file_cma = 0;
+ int cma_forbid = 0;
+
+ if (cma_forbidden_mask(sc->gfp_mask)) {
+ free_cma = global_page_state(NR_FREE_CMA_PAGES);
+ file_cma = global_page_state(NR_INACTIVE_FILE_CMA) +
+ global_page_state(NR_ACTIVE_FILE_CMA);
+ other_free -= free_cma;
+ other_file -= file_cma;
+ cma_forbid = 1;
+ }
+#endif /* CONFIG_AMLOGIC_CMA */
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
@@ -185,8 +244,24 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
cache_size, cache_limit,
min_score_adj,
free);
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (cma_forbid) {
+ /* kill quickly if can't use cma */
+ lowmem_deathpending_timeout = jiffies + HZ / 2;
+ pr_info(" Free cma:%ldkB, file cma:%ldkB\n",
+ free_cma * (long)(PAGE_SIZE / 1024),
+ file_cma * (long)(PAGE_SIZE / 1024));
+ } else {
+ lowmem_deathpending_timeout = jiffies + HZ;
+ }
+ #else
lowmem_deathpending_timeout = jiffies + HZ;
+ #endif /* CONFIG_AMLOGIC_CMA */
rem += selected_tasksize;
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+ if (!selected_oom_score_adj) /* forgeround task killed */
+ show_task_adj();
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
}
lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c3d0f70..348aa00 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -722,11 +722,11 @@ struct block_device *bdget(dev_t dev)
inode->i_rdev = dev;
inode->i_bdev = bdev;
inode->i_data.a_ops = &def_blk_aops;
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_CMA
mapping_set_gfp_mask(&inode->i_data, GFP_USER | __GFP_BDEV);
#else
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_CMA */
spin_lock(&bdev_lock);
list_add(&bdev->bd_list, &all_bdevs);
spin_unlock(&bdev_lock);
diff --git a/include/linux/amlogic/aml_cma.h b/include/linux/amlogic/aml_cma.h
new file mode 100644
index 0000000..947fd0ae
--- a/dev/null
+++ b/include/linux/amlogic/aml_cma.h
@@ -0,0 +1,93 @@
+/*
+ * include/linux/amlogic/aml_cma.h
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __AMLOGIC_CMA_H__
+#define __AMLOGIC_CMA_H__
+
+#include <linux/migrate_mode.h>
+#include <linux/pagemap.h>
+
+enum migrate_type {
+ COMPACT_NORMAL,
+ COMPACT_CMA,
+ COMPACT_TO_CMA,
+};
+
+#define __GFP_NO_CMA (__GFP_BDEV | __GFP_WRITE)
+
+/* copy from mm/internal.h, must keep same as it */
+struct compact_control {
+ struct list_head freepages; /* List of free pages to migrate to */
+ struct list_head migratepages; /* List of pages being migrated */
+ unsigned long nr_freepages; /* Number of isolated free pages */
+ unsigned long nr_migratepages; /* Number of pages to migrate */
+ unsigned long free_pfn; /* isolate_freepages search base */
+ unsigned long migrate_pfn; /* isolate_migratepages search base */
+ unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
+ enum migrate_mode mode; /* Async or sync migration mode */
+ enum migrate_type page_type;
+ bool ignore_skip_hint; /* Scan blocks even if marked skip */
+ bool ignore_block_suitable; /* Scan blocks considered unsuitable */
+ bool direct_compaction; /* False from kcompactd or /proc/... */
+ bool whole_zone; /* Whole zone should/has been scanned */
+ int order; /* order a direct compactor needs */
+ const gfp_t gfp_mask; /* gfp mask of a direct compactor */
+ const unsigned int alloc_flags; /* alloc flags of a direct compactor */
+ const int classzone_idx; /* zone index of a direct compactor */
+ struct zone *zone;
+ bool contended; /* Signal lock or sched contention */
+};
+
+static inline bool cma_forbidden_mask(gfp_t gfp_flags)
+{
+ if ((gfp_flags & __GFP_NO_CMA) || !(gfp_flags & __GFP_MOVABLE))
+ return true;
+ return false;
+}
+
+extern void cma_page_count_update(long size);
+extern void aml_cma_alloc_pre_hook(int *a, int b);
+extern void aml_cma_alloc_post_hook(int *a, int b, struct page *p);
+extern void aml_cma_release_hook(int a, struct page *p);
+extern struct page *get_cma_page(struct zone *zone, unsigned int order);
+extern unsigned long compact_to_free_cma(struct zone *zone);
+extern int cma_alloc_ref(void);
+extern bool can_use_cma(gfp_t gfp_flags);
+extern void get_cma_alloc_ref(void);
+extern void put_cma_alloc_ref(void);
+extern bool cma_page(struct page *page);
+extern unsigned long get_cma_allocated(void);
+extern unsigned long get_total_cmapages(void);
+extern spinlock_t cma_iso_lock;
+extern int aml_cma_alloc_range(unsigned long start, unsigned long end);
+
+extern void aml_cma_free(unsigned long pfn, unsigned int nr_pages);
+
+extern unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+ struct list_head *page_list);
+
+unsigned long
+isolate_freepages_range(struct compact_control *cc,
+ unsigned long start_pfn, unsigned long end_pfn);
+unsigned long
+isolate_migratepages_range(struct compact_control *cc,
+ unsigned long low_pfn, unsigned long end_pfn);
+
+struct page *compaction_cma_alloc(struct page *migratepage,
+ unsigned long data,
+ int **result);
+#endif /* __AMLOGIC_CMA_H__ */
diff --git a/include/linux/amlogic/page_trace.h b/include/linux/amlogic/page_trace.h
index 83c6fde..b3e93f8 100644
--- a/include/linux/amlogic/page_trace.h
+++ b/include/linux/amlogic/page_trace.h
@@ -56,7 +56,7 @@ struct page;
/* this struct should not larger than 32 bit */
struct page_trace {
- unsigned int ret_ip :24;
+ unsigned int ret_ip :24;
unsigned int migrate_type : 3;
unsigned int module_flag : 1;
unsigned int order : 4;
diff --git a/include/linux/cma.h b/include/linux/cma.h
index c73a16d..29f9e77 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -28,9 +28,4 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
-
-#ifdef CONFIG_AMLOGIC_MODIFY
-extern bool cma_suitable(gfp_t gfp_mask);
-extern unsigned long get_driver_alloc_cma(void);
-#endif /* CONFIG_AMLOGIC_MODIFY */
#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 1687800..476646b 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -41,9 +41,9 @@ struct vm_area_struct;
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
#define ___GFP_BDEV 0x4000000u
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -188,16 +188,16 @@ struct vm_area_struct;
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
#define __GFP_BDEV ((__force gfp_t)___GFP_BDEV)
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
/* Room for N __GFP_FOO bits */
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
#define __GFP_BITS_SHIFT 27
#else
#define __GFP_BITS_SHIFT 26
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 2b91387..8fc56ac 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -3,9 +3,9 @@
#include <linux/huge_mm.h>
#include <linux/swap.h>
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
#include <linux/page-isolation.h>
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
/**
* page_is_file_cache - should the page be on a file LRU or anon LRU?
@@ -49,41 +49,46 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
int nr_pages = hpage_nr_pages(page);
int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
int migrate_type = 0;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
- list_add(&page->lru, &lruvec->lists[lru]);
-
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
migrate_type = get_pageblock_migratetype(page);
- if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type))
+ if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type)) {
__mod_zone_page_state(page_zone(page),
NR_LRU_BASE + lru + num, nr_pages);
-#endif /* CONFIG_AMLOGIC_MODIFY */
+ list_add_tail(&page->lru, lruvec->cma_list[lru]);
+ /* Always to point to first cma page */
+ lruvec->cma_list[lru] = &page->lru;
+ } else
+ list_add(&page->lru, &lruvec->lists[lru]);
+#else
+ list_add(&page->lru, &lruvec->lists[lru]);
+#endif /* CONFIG_AMLOGIC_CMA */
}
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
int nr_pages = hpage_nr_pages(page);
int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
int migrate_type = 0;
-#endif /* CONFIG_AMLOGIC_MODIFY */
- list_del(&page->lru);
- update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
-
-#ifdef CONFIG_AMLOGIC_MODIFY
migrate_type = get_pageblock_migratetype(page);
- if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type))
+ if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type)) {
__mod_zone_page_state(page_zone(page),
NR_LRU_BASE + lru + num, -nr_pages);
-#endif /* CONFIG_AMLOGIC_MODIFY */
+ if (lruvec->cma_list[lru] == &page->lru)
+ lruvec->cma_list[lru] = page->lru.next;
+ }
+#endif /* CONFIG_AMLOGIC_CMA */
+ list_del(&page->lru);
+ update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
}
/**
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1bbbe0c..c30d800 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -137,23 +137,26 @@ enum zone_stat_item {
NUMA_LOCAL, /* allocation from local node */
NUMA_OTHER, /* allocation from other node */
#endif
-#ifdef CONFIG_AMLOGIC_MODIFY /* get free pages according migrate type */
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
NR_FREE_UNMOVABLE,
NR_FREE_MOVABLE,
NR_FREE_RECLAIMABLE,
NR_FREE_HIGHATOMIC,
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
NR_FREE_CMA_PAGES,
-#ifdef CONFIG_AMLOGIC_MODIFY
- /* This is in order with MIGRATE_TYPES */
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+#ifdef CONFIG_MEMORY_ISOLATION
NR_FREE_ISOLATE,
+#endif
+#ifdef CONFIG_AMLOGIC_CMA
NR_INACTIVE_ANON_CMA, /* must match order of LRU_[IN]ACTIVE */
NR_ACTIVE_ANON_CMA,
NR_INACTIVE_FILE_CMA,
NR_ACTIVE_FILE_CMA,
NR_UNEVICTABLE_FILE_CMA,
NR_CMA_ISOLATED, /* cma isolate */
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
NR_VM_ZONE_STAT_ITEMS };
enum node_stat_item {
@@ -240,6 +243,9 @@ struct zone_reclaim_stat {
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
+#ifdef CONFIG_AMLOGIC_CMA
+ struct list_head *cma_list[NR_LRU_LISTS];
+#endif /* CONFIG_AMLOGIC_CMA */
/* Evictions & activations on the inactive file list */
atomic_long_t inactive_age;
#ifdef CONFIG_MEMCG
@@ -247,6 +253,17 @@ struct lruvec {
#endif
};
+#ifdef CONFIG_AMLOGIC_CMA
+static inline bool lru_normal_empty(enum lru_list lru, struct lruvec *lruv)
+{
+ if (lruv->lists[lru].next == lruv->cma_list[lru])
+ return true;
+ else
+ return false;
+}
+#endif /* CONFIG_AMLOGIC_CMA */
+
+
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 8801000..dde2183 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -345,13 +345,13 @@ static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
int migratetype)
{
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
-#ifndef CONFIG_AMLOGIC_MODIFY
+#ifndef CONFIG_AMLOGIC_MEMORY_EXTEND
if (is_migrate_cma(migratetype))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
-#endif /* !CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
}
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
/* statistics free pages according migrate type */
static inline void __mod_zone_migrate_state(struct zone *zone, int nr_pages,
int migratetype)
@@ -362,7 +362,7 @@ static inline void __mod_zone_migrate_state(struct zone *zone, int nr_pages,
}
zone_page_state_add(nr_pages, zone, NR_FREE_UNMOVABLE + migratetype);
}
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
extern const char * const vmstat_text[];
diff --git a/mm/cma.c b/mm/cma.c
index a51103a..2ac28bb6 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -36,37 +36,15 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <trace/events/cma.h>
-#ifdef CONFIG_AMLOGIC_MODIFY
-#include <linux/amlogic/page_trace.h>
-#endif
+#ifdef CONFIG_AMLOGIC_CMA
+#include <linux/amlogic/aml_cma.h>
+#endif /* CONFIG_AMLOGIC_CMA */
#include "cma.h"
struct cma cma_areas[MAX_CMA_AREAS];
unsigned cma_area_count;
static DEFINE_MUTEX(cma_mutex);
-#ifdef CONFIG_AMLOGIC_MODIFY
-/* how many cma pages used by driver */
-static atomic_long_t driver_alloc_cma;
-static atomic_t cma_alloc_ref = ATOMIC_INIT(0);
-unsigned long get_driver_alloc_cma(void)
-{
- return atomic_long_read(&driver_alloc_cma);
-}
-
-static void update_cma_page_trace(struct page *page, unsigned long cnt)
-{
- long i;
-
- if (page == NULL)
- return;
-
- for (i = 0; i < cnt; i++) {
- set_page_trace(page, 0, __GFP_BDEV);
- page++;
- }
-}
-#endif /* CONFIG_AMLOGIC_MODIFY */
phys_addr_t cma_get_base(const struct cma *cma)
{
@@ -177,9 +155,6 @@ static int __init cma_init_reserved_areas(void)
if (ret)
return ret;
}
-#ifdef CONFIG_AMLOGIC_MODIFY
- atomic_long_set(&driver_alloc_cma, 0);
-#endif /* CONFIG_AMLOGIC_MODIFY */
return 0;
}
@@ -398,6 +373,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
int ret;
+#ifdef CONFIG_AMLOGIC_CMA
+ int dummy;
+#endif /* CONFIG_AMLOGIC_CMA */
if (!cma || !cma->count)
return NULL;
@@ -416,6 +394,10 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
if (bitmap_count > bitmap_maxno)
return NULL;
+#ifdef CONFIG_AMLOGIC_CMA
+ aml_cma_alloc_pre_hook(&dummy, count);
+#endif /* CONFIG_AMLOGIC_CMA */
+
for (;;) {
mutex_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
@@ -423,11 +405,6 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
offset);
if (bitmap_no >= bitmap_maxno) {
mutex_unlock(&cma->lock);
- #ifdef CONFIG_AMLOGIC_MODIFY
- /* for debug */
- pr_err("can't find zero bit map for %lx, cnt:%ld\n",
- cma->base_pfn, count);
- #endif
break;
}
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
@@ -440,7 +417,11 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma_mutex);
+ #ifdef CONFIG_AMLOGIC_CMA
+ ret = aml_cma_alloc_range(pfn, pfn + count);
+ #else
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ #endif /* CONFIG_AMLOGIC_CMA */
mutex_unlock(&cma_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
@@ -453,20 +434,17 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
- #ifndef CONFIG_AMLOGIC_MODIFY
+ #ifndef CONFIG_AMLOGIC_CMA
/* try again with a bit different memory target */
start = bitmap_no + mask + 1;
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_CMA */
}
trace_cma_alloc(pfn, page, count, align);
-#ifdef CONFIG_AMLOGIC_MODIFY
- if (page) {
- atomic_long_add(count, &driver_alloc_cma);
- update_cma_page_trace(page, count);
- }
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#ifdef CONFIG_AMLOGIC_CMA
+ aml_cma_alloc_post_hook(&dummy, count, page);
+#endif /* CONFIG_AMLOGIC_CMA */
pr_debug("%s(): returned %p\n", __func__, page);
return page;
}
@@ -497,29 +475,15 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+#ifdef CONFIG_AMLOGIC_CMA
+ aml_cma_release_hook(count, (struct page *)pages);
+ aml_cma_free(pfn, count);
+#else
free_contig_range(pfn, count);
+#endif /* CONFIG_AMLOGIC_CMA */
cma_clear_bitmap(cma, pfn, count);
trace_cma_release(pfn, pages, count);
-#ifdef CONFIG_AMLOGIC_MODIFY
- atomic_long_sub(count, &driver_alloc_cma);
-#endif /* CONFIG_AMLOGIC_MODIFY */
- return true;
-}
-
-#ifdef CONFIG_AMLOGIC_MODIFY
-bool cma_suitable(gfp_t gfp_mask)
-{
- if (gfp_mask & (__GFP_RECLAIMABLE | __GFP_BDEV | __GFP_WRITE))
- return false;
- if (!(gfp_mask & __GFP_MOVABLE))
- return false;
-
- /* try to reduce page lock wait for read */
- if (atomic_read(&cma_alloc_ref))
- return false;
-
return true;
}
-#endif /* CONFIG_AMLOGIC_MODIFY */
diff --git a/mm/compaction.c b/mm/compaction.c
index 3f10918..b24f499 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -814,9 +814,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* so avoid taking lru_lock and isolating it unnecessarily in an
* admittedly racy check.
*/
+ #ifndef CONFIG_AMLOGIC_CMA
if (!page_mapping(page) &&
page_count(page) > page_mapcount(page))
goto isolate_fail;
+ #endif /* !CONFIG_AMLOGIC_CMA */
/* If we already hold the lock, we can skip some rechecking */
if (!locked) {
@@ -839,6 +841,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
goto isolate_fail;
}
}
+ #ifdef CONFIG_AMLOGIC_CMA /* under protect of lock */
+ if (!page_mapping(page) &&
+ page_count(page) > page_mapcount(page))
+ goto isolate_fail;
+ #endif /* CONFIG_AMLOGIC_CMA */
lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
@@ -868,13 +875,13 @@ isolate_success:
cc->last_migrated_pfn = low_pfn;
/* Avoid isolating too much */
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_CMA
/* for cma, try to isolate more pages each time */
- if (cc->reason != MR_CMA &&
+ if (cc->page_type != COMPACT_CMA &&
cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
#else
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_CMA */
++low_pfn;
break;
}
@@ -1035,6 +1042,9 @@ static void isolate_freepages(struct compact_control *cc)
unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
struct list_head *freelist = &cc->freepages;
+#ifdef CONFIG_AMLOGIC_CMA
+ int migrate_type;
+#endif /* CONFIG_AMLOGIC_CMA */
/*
* Initialise the free scanner. The starting point is where we last
@@ -1084,6 +1094,13 @@ static void isolate_freepages(struct compact_control *cc)
if (!isolation_suitable(cc, page))
continue;
+ #ifdef CONFIG_AMLOGIC_CMA
+ migrate_type = get_pageblock_migratetype(page);
+ if (is_migrate_isolate(migrate_type))
+ continue;
+ if (is_migrate_cma(migrate_type) && cma_alloc_ref())
+ continue;
+ #endif /* CONFIG_AMLOGIC_CMA */
/* Found a block suitable for isolating free pages from. */
isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
freelist, false);
@@ -1134,6 +1151,16 @@ static struct page *compaction_alloc(struct page *migratepage,
{
struct compact_control *cc = (struct compact_control *)data;
struct page *freepage;
+#ifdef CONFIG_AMLOGIC_CMA
+ struct address_space *mapping;
+
+ mapping = page_mapping(migratepage);
+ if ((unsigned long)mapping & PAGE_MAPPING_ANON)
+ mapping = NULL;
+
+ if (mapping && !can_use_cma(mapping_gfp_mask(mapping)))
+ return alloc_page(mapping_gfp_mask(mapping) | __GFP_BDEV);
+#endif
/*
* Isolate free pages if necessary, and if we are not aborting due to
diff --git a/mm/internal.h b/mm/internal.h
index 9953613..02f712e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -15,9 +15,6 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/tracepoint-defs.h>
-#ifdef CONFIG_AMLOGIC_MODIFY
-#include <linux/migrate.h>
-#endif
/*
* The set of flags that only affect watermark checking and reclaim
@@ -161,6 +158,9 @@ extern int user_min_free_kbytes;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+#ifdef CONFIG_AMLOGIC_CMA
+#include <linux/amlogic/aml_cma.h>
+#else
/*
* in mm/compaction.c
*/
@@ -180,9 +180,6 @@ struct compact_control {
unsigned long migrate_pfn; /* isolate_migratepages search base */
unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
enum migrate_mode mode; /* Async or sync migration mode */
-#ifdef CONFIG_AMLOGIC_MODIFY
- enum migrate_reason reason; /* reason for compact */
-#endif /* CONFIG_AMLOGIC_MODIFY */
bool ignore_skip_hint; /* Scan blocks even if marked skip */
bool ignore_block_suitable; /* Scan blocks considered unsuitable */
bool direct_compaction; /* False from kcompactd or /proc/... */
@@ -201,6 +198,7 @@ isolate_freepages_range(struct compact_control *cc,
unsigned long
isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
+#endif /* CONFIG_AMLOGIC_CMA */
int find_suitable_fallback(struct free_area *area, unsigned int order,
int migratetype, bool only_stealable, bool *can_steal);
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 5652be8..83da7b2 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -93,6 +93,10 @@ void lruvec_init(struct lruvec *lruvec)
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
+#ifdef CONFIG_AMLOGIC_CMA
+ for_each_lru(lru)
+ lruvec->cma_list[lru] = &lruvec->lists[lru];
+#endif /* CONFIG_AMLOGIC_CMA */
}
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d73db28..f6243f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -64,10 +64,9 @@
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
-#ifdef CONFIG_AMLOGIC_MODIFY
-#include <linux/cma.h>
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
#include <linux/amlogic/page_trace.h>
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_PAGE_TRACE */
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -188,14 +187,7 @@ bool pm_suspended_storage(void)
unsigned int pageblock_order __read_mostly;
#endif
-#ifdef CONFIG_AMLOGIC_MODIFY
-/*
- * should return merged order to for quick free
- */
-static int __free_pages_ok(struct page *page, unsigned int order);
-#else
static void __free_pages_ok(struct page *page, unsigned int order);
-#endif
/*
* results with 256, 32 in the lowmem_reserve sysctl:
@@ -804,28 +796,21 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
* -- nyc
*/
-#ifdef CONFIG_AMLOGIC_MODIFY
-static inline int __free_one_page(struct page *page,
- unsigned long pfn,
- struct zone *zone, unsigned int order,
- int migratetype)
-#else
static inline void __free_one_page(struct page *page,
unsigned long pfn,
struct zone *zone, unsigned int order,
int migratetype)
-#endif /* CONFIG_AMLOGIC_MODIFY */
{
unsigned long page_idx;
unsigned long combined_idx;
unsigned long uninitialized_var(buddy_idx);
struct page *buddy;
unsigned int max_order;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
int buddy_mg;
migratetype = get_pageblock_migratetype(page);
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
@@ -854,7 +839,7 @@ continue_merging:
if (page_is_guard(buddy)) {
clear_page_guard(zone, buddy, order, migratetype);
} else {
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
/*
* Kernel have provided some information about it in
* /proc/pagetypeinfo, /proc/buddyinfo. But both of them
@@ -866,7 +851,7 @@ continue_merging:
*/
buddy_mg = get_pcppage_migratetype(buddy);
__mod_zone_migrate_state(zone, -(1 << order), buddy_mg);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
rmv_page_order(buddy);
@@ -928,11 +913,10 @@ done_merging:
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
zone->free_area[order].nr_free++;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
set_pcppage_migratetype(page, migratetype);
__mod_zone_migrate_state(zone, (1 << order), migratetype);
- return order;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
}
/*
@@ -1097,9 +1081,9 @@ static __always_inline bool free_pages_prepare(struct page *page,
kernel_poison_pages(page, 1 << order, 0);
kernel_map_pages(page, 1 << order, 0);
kasan_free_pages(page, order);
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
reset_page_trace(page, order);
-#endif
+#endif /* CONFIG_AMLOGIC_PAGE_TRACE */
return true;
}
@@ -1197,23 +1181,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
spin_unlock(&zone->lock);
}
-#ifdef CONFIG_AMLOGIC_MODIFY
-static int free_one_page(struct zone *zone,
- struct page *page, unsigned long pfn,
- unsigned int order,
- int migratetype)
-#else
static void free_one_page(struct zone *zone,
struct page *page, unsigned long pfn,
unsigned int order,
int migratetype)
-#endif /* CONFIG_AMLOGIC_MODIFY */
{
unsigned long nr_scanned;
-#ifdef CONFIG_AMLOGIC_MODIFY
- int free_order;
-#endif /* CONFIG_AMLOGIC_MODIFY */
-
spin_lock(&zone->lock);
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
@@ -1223,15 +1196,8 @@ static void free_one_page(struct zone *zone,
is_migrate_isolate(migratetype))) {
migratetype = get_pfnblock_migratetype(page, pfn);
}
-#ifdef CONFIG_AMLOGIC_MODIFY
- free_order = __free_one_page(page, pfn, zone, order, migratetype);
-#else
__free_one_page(page, pfn, zone, order, migratetype);
-#endif /* CONFIG_AMLOGIC_MODIFY */
spin_unlock(&zone->lock);
-#ifdef CONFIG_AMLOGIC_MODIFY
- return free_order;
-#endif /* CONFIG_AMLOGIC_MODIFY */
}
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1307,40 +1273,20 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
}
}
-#ifdef CONFIG_AMLOGIC_MODIFY
-static int __free_pages_ok(struct page *page, unsigned int order)
-#else
static void __free_pages_ok(struct page *page, unsigned int order)
-#endif /* CONFIG_AMLOGIC_MODIFY */
{
unsigned long flags;
int migratetype;
-#ifdef CONFIG_AMLOGIC_MODIFY
- int free_order;
-#endif /* CONFIG_AMLOGIC_MODIFY */
unsigned long pfn = page_to_pfn(page);
-#ifdef CONFIG_AMLOGIC_MODIFY
- if (!free_pages_prepare(page, order, true))
- return -1;
-#else
if (!free_pages_prepare(page, order, true))
return;
-#endif /* CONFIG_AMLOGIC_MODIFY */
migratetype = get_pfnblock_migratetype(page, pfn);
local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);
-#ifdef CONFIG_AMLOGIC_MODIFY
- free_order = free_one_page(page_zone(page), page,
- pfn, order, migratetype);
-#else
free_one_page(page_zone(page), page, pfn, order, migratetype);
-#endif /* CONFIG_AMLOGIC_MODIFY */
local_irq_restore(flags);
-#ifdef CONFIG_AMLOGIC_MODIFY
- return free_order;
-#endif /* CONFIG_AMLOGIC_MODIFY */
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -1746,10 +1692,10 @@ static inline void expand(struct zone *zone, struct page *page,
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
set_pcppage_migratetype(&page[size], migratetype);
__mod_zone_migrate_state(zone, (1 << high), migratetype);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
set_page_order(&page[size], high);
}
}
@@ -1904,10 +1850,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
list_del(&page->lru);
rmv_page_order(page);
area->nr_free--;
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
__mod_zone_migrate_state(zone, -(1 << current_order),
migratetype);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
expand(zone, page, order, current_order, area, migratetype);
set_pcppage_migratetype(page, migratetype);
return page;
@@ -1956,9 +1902,9 @@ int move_freepages(struct zone *zone,
struct page *page;
unsigned int order;
int pages_moved = 0;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
int list_type;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
#ifndef CONFIG_HOLES_IN_ZONE
/*
@@ -1988,12 +1934,12 @@ int move_freepages(struct zone *zone,
order = page_order(page);
list_move(&page->lru,
&zone->free_area[order].free_list[migratetype]);
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
list_type = get_pcppage_migratetype(page);
__mod_zone_migrate_state(zone, -(1 << order), list_type);
__mod_zone_migrate_state(zone, (1 << order), migratetype);
set_pcppage_migratetype(page, migratetype);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
page += 1 << order;
pages_moved += 1 << order;
}
@@ -2073,13 +2019,13 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
* pages are moved, we can change migratetype of pageblock and permanently
* use it's pages as requested migratetype in the future.
*/
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
static void steal_suitable_fallback(struct zone *zone, struct page *page,
int start_type, int *list_type)
#else
static void steal_suitable_fallback(struct zone *zone, struct page *page,
int start_type)
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
{
unsigned int current_order = page_order(page);
int pages;
@@ -2091,9 +2037,9 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
}
pages = move_freepages_block(zone, page, start_type);
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
*list_type = start_type;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
/* Claim the whole block if over half of it is free */
if (pages >= (1 << (pageblock_order-1)) ||
@@ -2254,9 +2200,9 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
struct page *page;
int fallback_mt;
bool can_steal;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
int list_type;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1;
@@ -2270,7 +2216,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
page = list_first_entry(&area->free_list[fallback_mt],
struct page, lru);
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
/* list_type may change after try_to_steal_freepages */
list_type = fallback_mt;
if (can_steal)
@@ -2279,14 +2225,14 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
#else
if (can_steal)
steal_suitable_fallback(zone, page, start_migratetype);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
/* Remove the page from the freelists */
area->nr_free--;
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
__mod_zone_migrate_state(zone, -(1 << current_order),
list_type);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
list_del(&page->lru);
rmv_page_order(page);
@@ -2314,19 +2260,19 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
* Do the hard work of removing an element from the buddy allocator.
* Call me with the zone->lock already held.
*/
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
static struct page *__rmqueue(struct zone *zone, unsigned int order,
int migratetype, gfp_t gfp_flags)
#else
static struct page *__rmqueue(struct zone *zone, unsigned int order,
int migratetype)
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
{
struct page *page;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
/* use CMA first */
- if (migratetype == MIGRATE_MOVABLE && cma_suitable(gfp_flags)) {
+ if (migratetype == MIGRATE_MOVABLE && can_use_cma(gfp_flags)) {
page = __rmqueue_cma_fallback(zone, order);
if (page) {
trace_mm_page_alloc_zone_locked(page, order,
@@ -2334,14 +2280,14 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
return page;
}
}
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
- #ifndef CONFIG_AMLOGIC_MODIFY /* no need to try again */
+ #ifndef CONFIG_AMLOGIC_CMA /* no need to try again */
if (migratetype == MIGRATE_MOVABLE)
page = __rmqueue_cma_fallback(zone, order);
- #endif /* !CONFIG_AMLOGIC_MODIFY */
+ #endif /* !CONFIG_AMLOGIC_CMA */
if (!page)
page = __rmqueue_fallback(zone, order, migratetype);
@@ -2351,7 +2297,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
return page;
}
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
/*
* get page but not cma
*/
@@ -2370,14 +2316,14 @@ static struct page *rmqueue_no_cma(struct zone *zone, unsigned int order,
spin_unlock(&zone->lock);
return page;
}
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
/*
* Obtain a specified number of elements from the buddy allocator, all under
* a single hold of the lock, for efficiency. Add them to the supplied list.
* Returns the number of new pages which were placed at *list.
*/
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, bool cold, gfp_t flags)
@@ -2385,17 +2331,17 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, bool cold)
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
{
int i, alloced = 0;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_CMA
struct page *page = __rmqueue(zone, order, migratetype, flags);
#else
struct page *page = __rmqueue(zone, order, migratetype);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_CMA */
if (unlikely(page == NULL))
break;
@@ -2417,11 +2363,11 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
list_add_tail(&page->lru, list);
list = &page->lru;
alloced++;
- #ifndef CONFIG_AMLOGIC_MODIFY
+ #ifndef CONFIG_AMLOGIC_MEMORY_EXTEND
if (is_migrate_cma(get_pcppage_migratetype(page)))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
-(1 << order));
- #endif /* !CONFIG_AMLOGIC_MODIFY */
+ #endif /* !CONFIG_AMLOGIC_MEMORY_EXTEND */
}
/*
@@ -2639,7 +2585,7 @@ void free_hot_cold_page(struct page *page, bool cold)
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
if (unlikely(is_migrate_isolate(migratetype)) ||
unlikely(is_migrate_cma(migratetype))) {
free_one_page(zone, page, pfn, 0, migratetype);
@@ -2650,7 +2596,7 @@ void free_hot_cold_page(struct page *page, bool cold)
free_one_page(zone, page, pfn, 0, migratetype);
goto out;
}
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
migratetype = MIGRATE_MOVABLE;
}
@@ -2741,10 +2687,10 @@ int __isolate_free_page(struct page *page, unsigned int order)
/* Remove page from free list */
list_del(&page->lru);
zone->free_area[order].nr_free--;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
__mod_zone_migrate_state(zone, -(1 << order),
get_pcppage_migratetype(page));
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
rmv_page_order(page);
/*
@@ -2818,7 +2764,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_CMA
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold,
@@ -2827,7 +2773,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_CMA */
if (unlikely(list_empty(list)))
goto failed;
}
@@ -2837,7 +2783,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
else
page = list_first_entry(list, struct page, lru);
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
/*
* USING CMA FIRST POLICY situations:
* 1. CMA pages may return to pcp and allocated next
@@ -2849,7 +2795,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
* For 2, we should replace with a cma page
* before page is deleted from PCP list.
*/
- if (!cma_suitable(gfp_flags) &&
+ if (!can_use_cma(gfp_flags) &&
is_migrate_cma_page(page)) {
/* case 1 */
page = rmqueue_no_cma(zone, order, migratetype);
@@ -2858,7 +2804,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
goto failed;
} else if ((migratetype == MIGRATE_MOVABLE) &&
(get_pcppage_migratetype(page) != MIGRATE_CMA) &&
- cma_suitable(gfp_flags)) {
+ can_use_cma(gfp_flags)) {
struct page *tmp_page;
spin_lock(&zone->lock);
@@ -2875,7 +2821,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
goto alloc_success;
}
use_pcp:
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
list_del(&page->lru);
pcp->count--;
@@ -2897,12 +2843,12 @@ use_pcp:
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
if (!page)
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_CMA
page = __rmqueue(zone, order,
migratetype, gfp_flags);
#else
page = __rmqueue(zone, order, migratetype);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_CMA */
} while (page && check_new_pages(page, order));
spin_unlock(&zone->lock);
if (!page)
@@ -2911,9 +2857,9 @@ use_pcp:
get_pcppage_migratetype(page));
}
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
alloc_success:
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
@@ -4009,7 +3955,7 @@ got_pg:
return page;
}
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
static inline void should_wakeup_kswap(gfp_t gfp_mask, int order,
struct alloc_context *ac)
{
@@ -4023,10 +3969,10 @@ static inline void should_wakeup_kswap(gfp_t gfp_mask, int order,
for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
ac->nodemask) {
free_pages = zone_page_state(zone, NR_FREE_PAGES);
- #ifdef CONFIG_CMA
- if (cma_suitable(gfp_mask))
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (can_use_cma(gfp_mask))
free_cma = zone_page_state(zone, NR_FREE_CMA_PAGES);
- #endif
+ #endif /* CONFIG_AMLOGIC_CMA */
free_pages -= free_cma;
/*
* wake up kswapd before get pages from buddy, this help to
@@ -4037,7 +3983,7 @@ static inline void should_wakeup_kswap(gfp_t gfp_mask, int order,
wakeup_kswapd(zone, order, ac->high_zoneidx);
}
}
-#endif
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
/*
* This is the 'heart' of the zoned buddy allocator.
@@ -4102,9 +4048,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
*/
goto no_zone;
}
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
should_wakeup_kswap(gfp_mask, order, &ac);
-#endif
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
/* First allocation attempt */
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
@@ -4139,9 +4085,9 @@ out:
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
set_page_trace(page, order, gfp_mask);
-#endif
+#endif /* CONFIG_AMLOGIC_PAGE_TRACE */
return page;
}
@@ -4185,26 +4131,6 @@ void __free_pages(struct page *page, unsigned int order)
EXPORT_SYMBOL(__free_pages);
-#ifdef CONFIG_AMLOGIC_MODIFY
-int __free_pages_cma(struct page *page, unsigned int order, unsigned int *cnt)
-{
- int i;
- int ref = 0;
-
- /* clear ref count first */
- for (i = 0; i < (1 << order); i++) {
- if (!put_page_testzero(page + i))
- ref++;
- }
- if (ref) {
- pr_info("%s, %d pages are still in use\n", __func__, ref);
- *cnt += ref;
- return -1;
- }
- return __free_pages_ok(page, order);
-}
-#endif /* CONFIG_AMLOGIC_MODIFY */
-
void free_pages(unsigned long addr, unsigned int order)
{
if (addr != 0) {
@@ -4615,9 +4541,9 @@ void show_free_areas(unsigned int filter)
" unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_CMA
" [cma] driver:%lu anon:%lu file:%lu isolate:%lu total:%lu\n"
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_CMA */
" free:%lu free_pcp:%lu free_cma:%lu\n",
global_node_page_state(NR_ACTIVE_ANON),
global_node_page_state(NR_INACTIVE_ANON),
@@ -4635,15 +4561,15 @@ void show_free_areas(unsigned int filter)
global_node_page_state(NR_SHMEM),
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE),
- #ifdef CONFIG_AMLOGIC_MODIFY
- get_driver_alloc_cma(),
+ #ifdef CONFIG_AMLOGIC_CMA
+ get_cma_allocated(),
global_page_state(NR_INACTIVE_ANON_CMA) +
global_page_state(NR_ACTIVE_ANON_CMA),
global_page_state(NR_INACTIVE_FILE_CMA) +
global_page_state(NR_ACTIVE_FILE_CMA),
global_page_state(NR_CMA_ISOLATED),
totalcma_pages,
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_CMA */
global_page_state(NR_FREE_PAGES),
free_pcp,
global_page_state(NR_FREE_CMA_PAGES));
@@ -4728,7 +4654,7 @@ void show_free_areas(unsigned int filter)
" bounce:%lukB"
" free_pcp:%lukB"
" local_pcp:%ukB"
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
" free_unmovable:%lukB"
" free_movable:%lukB"
" free_reclaimable:%lukB"
@@ -4736,7 +4662,7 @@ void show_free_areas(unsigned int filter)
#ifdef CONFIG_MEMORY_ISOLATION
" free_isolate:%lukB"
#endif
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
" free_cma:%lukB"
"\n",
zone->name,
@@ -4760,7 +4686,7 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_BOUNCE)),
K(free_pcp),
K(this_cpu_read(zone->pageset->pcp.count)),
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
K(zone_page_state(zone, NR_FREE_UNMOVABLE)),
K(zone_page_state(zone, NR_FREE_MOVABLE)),
K(zone_page_state(zone, NR_FREE_RECLAIMABLE)),
@@ -4768,7 +4694,7 @@ void show_free_areas(unsigned int filter)
#ifdef CONFIG_MEMORY_ISOLATION
K(zone_page_state(zone, NR_FREE_ISOLATE)),
#endif
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
@@ -7502,10 +7428,6 @@ bool is_pageblock_removable_nolock(struct page *page)
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
-#ifdef CONFIG_AMLOGIC_MODIFY
-#define cma_debug pr_debug
-#endif /* CONFIG_AMLOGIC_MODIFY */
-
static unsigned long pfn_max_align_down(unsigned long pfn)
{
return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
@@ -7533,10 +7455,6 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
while (pfn < end || !list_empty(&cc->migratepages)) {
if (fatal_signal_pending(current)) {
ret = -EINTR;
- #ifdef CONFIG_AMLOGIC_MODIFY /* for debug */
- cma_debug("cma %s %d, ret:%d\n",
- __func__, __LINE__, ret);
- #endif /* CONFIG_AMLOGIC_MODIFY */
break;
}
@@ -7545,19 +7463,11 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
pfn = isolate_migratepages_range(cc, pfn, end);
if (!pfn) {
ret = -EINTR;
- #ifdef CONFIG_AMLOGIC_MODIFY /* for debug */
- cma_debug("cma %s %d, ret:%d\n",
- __func__, __LINE__, ret);
- #endif /* CONFIG_AMLOGIC_MODIFY */
break;
}
tries = 0;
} else if (++tries == 5) {
ret = ret < 0 ? ret : -EBUSY;
- #ifdef CONFIG_AMLOGIC_MODIFY /* for debug */
- cma_debug("cma %s %d, ret:%d\n",
- __func__, __LINE__, ret);
- #endif /* CONFIG_AMLOGIC_MODIFY */
break;
}
@@ -7570,109 +7480,11 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
}
if (ret < 0) {
putback_movable_pages(&cc->migratepages);
- #ifdef CONFIG_AMLOGIC_MODIFY /* for debug */
- cma_debug("cma %s %d, ret:%d\n", __func__, __LINE__, ret);
- #endif /* CONFIG_AMLOGIC_MODIFY */
return ret;
}
return 0;
}
-#ifdef CONFIG_AMLOGIC_MODIFY
-#define BOOST_BUSY (0xffff)
-struct cma_pcp_work {
- unsigned long pfn;
- unsigned long count;
- void *data;
- struct work_struct work;
-};
-
-static DEFINE_PER_CPU(struct cma_pcp_work, cma_all_work);
-
-static void cma_boost_work_func(struct work_struct *work)
-{
- struct cma_pcp_work *c_work;
- unsigned long pfn, end;
- int ret = -1;
- atomic_t *ok;
- int this_cpu = smp_processor_id();
- struct compact_control cc = {
- .nr_migratepages = 0,
- .order = -1,
- .mode = MIGRATE_SYNC,
- .reason = MR_CMA,
- .ignore_skip_hint = true,
- };
- INIT_LIST_HEAD(&cc.migratepages);
-
- c_work = container_of(work, struct cma_pcp_work, work);
- pfn = c_work->pfn;
- cc.zone = page_zone(pfn_to_page(pfn));
- end = pfn + c_work->count;
- ret = __alloc_contig_migrate_range(&cc, pfn, end);
- ok = (atomic_t *)c_work->data;
- if (!ret) {
- atomic_inc(ok);
- lru_add_drain();
- drain_pages(this_cpu);
- } else if (ret == -EBUSY)
- atomic_add(BOOST_BUSY, ok); /* tell caller busy */
-
- if (ret) {
- cma_debug("%s, failed, ret:%d, ok:%d\n",
- __func__, ret, atomic_read(ok));
- }
-}
-
-int alloc_contig_boost(unsigned long start_pfn, unsigned long count)
-{
- static struct cpumask has_work;
- int cpu, cpus, i = 0, ret;
- atomic_t ok;
- unsigned long delta;
- unsigned long cnt;
- struct cma_pcp_work *work;
-
- cpumask_clear(&has_work);
-
- cpus = num_online_cpus();
- cnt = count;
- delta = count / cpus;
- atomic_set(&ok, 0);
- for_each_online_cpu(cpu) {
- work = &per_cpu(cma_all_work, cpu);
- work->data = &ok;
- work->pfn = start_pfn + i * delta;
- work->count = delta;
- if (i == cpus - 1)
- work->count = count - i * delta;
- INIT_WORK(&work->work, cma_boost_work_func);
- schedule_work_on(cpu, &work->work);
- cpumask_set_cpu(cpu, &has_work);
- i++;
- }
-
- for_each_cpu(cpu, &has_work) {
- work = &per_cpu(cma_all_work, cpu);
- flush_work(&work->work);
- }
-
- if (atomic_read(&ok) == cpus)
- ret = 0;
- else if (atomic_read(&ok) >= BOOST_BUSY)
- ret = -EBUSY;
- else
- ret = -EINVAL;
-
- if (ret) {
- cma_debug("%s, failed, ret:%d, ok:%d\n",
- __func__, ret, atomic_read(&ok));
- }
-
- return ret;
-}
-#endif /* CONFIG_AMLOGIC_MODIFY */
-
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
@@ -7699,9 +7511,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
unsigned long outer_start, outer_end;
unsigned int order;
int ret = 0;
-#ifdef CONFIG_AMLOGIC_MODIFY
- int cpus = 0;
-#endif
struct compact_control cc = {
.nr_migratepages = 0,
@@ -7709,9 +7518,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
.zone = page_zone(pfn_to_page(start)),
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
- #ifdef CONFIG_AMLOGIC_MODIFY
- .reason = MR_CMA,
- #endif /* CONFIG_AMLOGIC_MODIFY */
};
INIT_LIST_HEAD(&cc.migratepages);
@@ -7738,39 +7544,13 @@ int alloc_contig_range(unsigned long start, unsigned long end,
* aligned range but not in the unaligned, original range are
* put back to page allocator so that buddy can use them.
*/
+
ret = start_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype,
false);
-#ifdef CONFIG_AMLOGIC_MODIFY
- if (ret) { /* for debug */
- cma_debug("cma %s %d, ret:%d\n", __func__, __LINE__, ret);
- return ret;
- }
-#else
if (ret)
return ret;
-#endif /* CONFIG_AMLOGIC_MODIFY */
-#ifdef CONFIG_AMLOGIC_MODIFY
- /*
- * try to use more cpu to do this job when alloc count is large
- */
- if ((num_online_cpus() > 1) &&
- ((end - start) >= pageblock_nr_pages / 2)) {
- get_online_cpus();
- ret = alloc_contig_boost(start, end - start);
- put_online_cpus();
- cpus = !ret ? 1 : 0;
- } else
- ret = __alloc_contig_migrate_range(&cc, start, end);
-
- if (ret && ret != -EBUSY)
- goto done;
- if (!cpus) {
- lru_add_drain_all();
- drain_all_pages(cc.zone);
- }
-#else
/*
* In case of -EBUSY, we'd like to know which page causes problem.
* So, just fall through. We will check it in test_pages_isolated().
@@ -7798,7 +7578,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
lru_add_drain_all();
drain_all_pages(cc.zone);
-#endif /* CONFIG_AMLOGIC_MODIFY */
order = 0;
outer_start = start;
@@ -7825,10 +7604,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
/* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end, false)) {
- #ifdef CONFIG_AMLOGIC_MODIFY
- cma_debug("%s: [%lx, %lx) PFNs busy\n",
+ pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end);
- #endif /* CONFIG_AMLOGIC_MODIFY */
ret = -EBUSY;
goto done;
}
@@ -7837,9 +7614,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
outer_end = isolate_freepages_range(&cc, outer_start, end);
if (!outer_end) {
ret = -EBUSY;
- #ifdef CONFIG_AMLOGIC_MODIFY
- cma_debug("cma %s %d, ret:%d\n", __func__, __LINE__, ret);
- #endif /* CONFIG_AMLOGIC_MODIFY */
goto done;
}
@@ -7858,49 +7632,13 @@ done:
void free_contig_range(unsigned long pfn, unsigned nr_pages)
{
unsigned int count = 0;
-#ifdef CONFIG_AMLOGIC_MODIFY
- struct page *page;
- int free_order, start_order = 0;
- int batch;
-#endif /* CONFIG_AMLOGIC_MODIFY */
-#ifdef CONFIG_AMLOGIC_MODIFY
- while (nr_pages) {
- page = pfn_to_page(pfn);
- batch = (1 << start_order);
- free_order = __free_pages_cma(page, start_order, &count);
- cma_debug("pages:%4d, free:%2d, start:%2d, batch:%4d, pfn:%ld\n",
- nr_pages, free_order,
- start_order, batch, pfn);
- nr_pages -= batch;
- pfn += batch;
- /*
- * since pages are contigunous, and it's buddy already has large
- * order, we can try to free same oder as free_order to get more
- * quickly free speed.
- */
- if (free_order < 0) {
- start_order = 0;
- continue;
- }
- if (nr_pages >= (1 << free_order)) {
- start_order = free_order;
- } else {
- /* remain pages is not enough */
- start_order = 0;
- while (nr_pages >= (1 << start_order))
- start_order++;
- start_order--;
- }
- }
-#else
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
count += page_count(page) != 1;
__free_page(page);
}
-#endif /* CONFIG_AMLOGIC_MODIFY */
WARN(count != 0, "%d pages are still in use!\n", count);
}
#endif
@@ -7988,10 +7726,10 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
list_del(&page->lru);
rmv_page_order(page);
zone->free_area[order].nr_free--;
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
__mod_zone_migrate_state(zone, -(1 << order),
get_pcppage_migratetype(page));
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
for (i = 0; i < (1 << order); i++)
SetPageReserved((page+i));
pfn += (1 << order);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index a82c364..1187dea 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -164,11 +164,11 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
{
unsigned long pfn;
unsigned long undo_pfn;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
struct page *page = NULL; /* avoid compile error */
#else
struct page *page;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
@@ -183,11 +183,11 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
goto undo;
}
}
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
if (migratetype == MIGRATE_CMA && page)
mod_zone_page_state(page_zone(page), NR_CMA_ISOLATED,
end_pfn - start_pfn);
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
return 0;
undo:
for (pfn = start_pfn;
@@ -205,7 +205,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype)
{
unsigned long pfn;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
struct page *page = NULL; /* avoid compile error */
#else
struct page *page;
@@ -222,11 +222,11 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
continue;
unset_migratetype_isolate(page, migratetype);
}
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
if (migratetype == MIGRATE_CMA && page)
mod_zone_page_state(page_zone(page), NR_CMA_ISOLATED,
start_pfn - end_pfn);
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
return 0;
}
/*
@@ -258,17 +258,8 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
else if (skip_hwpoisoned_pages && PageHWPoison(page))
/* A HWPoisoned page cannot be also PageBuddy */
pfn++;
- #ifdef CONFIG_AMLOGIC_MODIFY
- else { /* for debug */
- pr_debug("%s, pfn:%lx, flag:%lx, map_cnt:%d\n",
- __func__, pfn, page->flags,
- atomic_read(&page->_mapcount));
- break;
- }
- #else
else
break;
- #endif /* CONFIG_AMLOGIC_MODIFY */
}
return pfn;
@@ -293,16 +284,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
break;
}
page = __first_valid_page(start_pfn, end_pfn - start_pfn);
-#ifdef CONFIG_AMLOGIC_MODIFY
- if ((pfn < end_pfn) || !page) { /* for debug */
- pr_err("%s, pfn:%lx, endpfn:%lx, page:%p\n",
- __func__, pfn, end_pfn, page);
- return -EBUSY;
- }
-#else
if ((pfn < end_pfn) || !page)
return -EBUSY;
-#endif /* CONFIG_AMLOGIC_MODIFY */
/* Check all pages are free or marked as ISOLATED */
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
@@ -320,16 +303,6 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
{
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
-#ifdef CONFIG_AMLOGIC_MODIFY
- /*
- * currently this function is only used for CMA migrate, so do not
- * allcate memory from cma freelist again
- * TODO:
- * if this flag is set and migrate can't allocate memory from other
- * freelist, try to allocate from another cma pool
- */
- gfp_mask |= __GFP_BDEV;
-#endif
/*
* TODO: allocate a destination hugepage from a nearest neighbor node,
* accordance with memory policy of the user process if possible. For
diff --git a/mm/readahead.c b/mm/readahead.c
index c8a955b..b7ee30b 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -160,6 +160,11 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
loff_t isize = i_size_read(inode);
gfp_t gfp_mask = readahead_gfp_mask(mapping);
+#ifdef CONFIG_AMLOGIC_CMA
+ if (filp->f_mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))
+ gfp_mask |= __GFP_WRITE;
+#endif /* CONFIG_AMLOGIC_CMA */
+
if (isize == 0)
goto out;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f0c6bc2..c3a8977 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1122,13 +1122,13 @@ static void print_slabinfo_header(struct seq_file *m)
#else
seq_puts(m, "slabinfo - version: 2.1\n");
#endif
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
/* add total bytes for each slab */
seq_puts(m, "# name <active_objs> <num_objs> ");
seq_puts(m, "<objsize> <objperslab> <pagesperslab> <total bytes>");
#else
seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#ifdef CONFIG_DEBUG_SLAB
@@ -1178,7 +1178,7 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
static void cache_show(struct kmem_cache *s, struct seq_file *m)
{
struct slabinfo sinfo;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
char name[32];
long total;
#endif
@@ -1188,7 +1188,7 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m)
memcg_accumulate_slabinfo(s, &sinfo);
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
strncpy(name, cache_name(s), 31);
total = sinfo.num_objs * s->size;
seq_printf(m, "%-31s %6lu %6lu %6u %4u %4d %8lu",
@@ -1199,7 +1199,7 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m)
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
sinfo.objects_per_slab, (1 << sinfo.cache_order));
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
seq_printf(m, " : tunables %4u %4u %4u",
sinfo.limit, sinfo.batchcount, sinfo.shared);
diff --git a/mm/swap.c b/mm/swap.c
index 4dcf852..e7f4169 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -210,7 +210,18 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page);
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (!cma_page(page)) {
+ list_move_tail(&page->lru, lruvec->cma_list[lru]);
+ } else {
+ if ((lruvec->cma_list[lru] == &page->lru) &&
+ (page->lru.next != &lruvec->lists[lru]))
+ lruvec->cma_list[lru] = page->lru.next;
+ list_move_tail(&page->lru, &lruvec->lists[lru]);
+ }
+ #else
list_move_tail(&page->lru, &lruvec->lists[lru]);
+ #endif /* CONFIG_AMLOGIC_CMA */
(*pgmoved)++;
}
}
@@ -549,7 +560,18 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
* The page's writeback ends up during pagevec
* We moves tha page into tail of inactive.
*/
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (!cma_page(page)) {
+ list_move_tail(&page->lru, lruvec->cma_list[lru]);
+ } else {
+ if ((lruvec->cma_list[lru] == &page->lru) &&
+ (page->lru.next != &lruvec->lists[lru]))
+ lruvec->cma_list[lru] = page->lru.next;
+ list_move_tail(&page->lru, &lruvec->lists[lru]);
+ }
+ #else
list_move_tail(&page->lru, &lruvec->lists[lru]);
+ #endif /* CONFIG_AMLOGIC_CMA */
__count_vm_event(PGROTATED);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f2ca309..3a65c54 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1456,16 +1456,30 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long scan, nr_pages;
LIST_HEAD(pages_skipped);
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
- int migrate_type = 0;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+ bool use_cma = true, is_cma_page;
+
+ if (cma_forbidden_mask(sc->gfp_mask))
+ use_cma = false;
+#endif /* CONFIG_AMLOGIC_CMA */
for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
!list_empty(src);) {
struct page *page;
+ #ifdef CONFIG_AMLOGIC_CMA
+ page = NULL;
+ if (!use_cma) {
+ if (!lru_normal_empty(lru, lruvec))
+ page = lru_to_page(lruvec->cma_list[lru]);
+ }
+ if (!page)
+ page = lru_to_page(src);
+ is_cma_page = cma_page(page);
+ #else
page = lru_to_page(src);
+ #endif /* CONFIG_AMLOGIC_CMA */
prefetchw_prev_lru_page(page, src, flags);
VM_BUG_ON_PAGE(!PageLRU(page), page);
@@ -1487,20 +1501,30 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
nr_pages = hpage_nr_pages(page);
nr_taken += nr_pages;
nr_zone_taken[page_zonenum(page)] += nr_pages;
- list_move(&page->lru, dst);
- #ifdef CONFIG_AMLOGIC_MODIFY
- migrate_type = get_pageblock_migratetype(page);
- if (is_migrate_cma(migrate_type) ||
- is_migrate_isolate(migrate_type))
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (is_cma_page) {
__mod_zone_page_state(page_zone(page),
- NR_LRU_BASE + lru + num,
- -nr_pages);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ NR_LRU_BASE + lru + num,
+ -nr_pages);
+ if (lruvec->cma_list[lru] == &page->lru)
+ lruvec->cma_list[lru] = page->lru.next;
+ }
+ #endif /* CONFIG_AMLOGIC_CMA */
+ list_move(&page->lru, dst);
break;
case -EBUSY:
/* else it is being freed elsewhere */
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (is_cma_page) {
+ list_move(&page->lru,
+ lruvec->cma_list[lru]->prev);
+ lruvec->cma_list[lru] = &page->lru;
+ } else
+ list_move(&page->lru, src);
+ #else
list_move(&page->lru, src);
+ #endif /* CONFIG_AMLOGIC_CMA */
continue;
default:
@@ -1603,11 +1627,11 @@ int isolate_lru_page(struct page *page)
static int too_many_isolated(struct pglist_data *pgdat, int file,
struct scan_control *sc)
{
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
signed long inactive, isolated;
#else
unsigned long inactive, isolated;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
if (current_is_kswapd())
return 0;
@@ -1623,9 +1647,9 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
}
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
isolated -= node_page_state(pgdat, NR_CMA_ISOLATED);
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
/*
* GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
* won't get blocked by normal direct-reclaimers, forming a circular
@@ -1634,12 +1658,12 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
inactive >>= 3;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
WARN_ONCE(isolated > inactive,
"isolated:%ld, cma:%ld, inactive:%ld, mask:%x, file:%d\n",
isolated, node_page_state(pgdat, NR_CMA_ISOLATED),
inactive, sc->gfp_mask, file);
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_CMA */
return isolated > inactive;
}
@@ -1908,10 +1932,10 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
unsigned long pgmoved = 0;
struct page *page;
int nr_pages;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_CMA
int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
- int migrate_type = 0;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+ bool is_cma_page;
+#endif /* CONFIG_AMLOGIC_CMA */
while (!list_empty(list)) {
page = lru_to_page(list);
@@ -1922,16 +1946,20 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
nr_pages = hpage_nr_pages(page);
update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
- list_move(&page->lru, &lruvec->lists[lru]);
- pgmoved += nr_pages;
- #ifdef CONFIG_AMLOGIC_MODIFY
- migrate_type = get_pageblock_migratetype(page);
- if (is_migrate_cma(migrate_type) ||
- is_migrate_isolate(migrate_type))
+ #ifdef CONFIG_AMLOGIC_CMA
+ is_cma_page = cma_page(page);
+ if (is_cma_page) {
__mod_zone_page_state(page_zone(page),
NR_LRU_BASE + lru + num,
nr_pages);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ list_move(&page->lru, lruvec->cma_list[lru]->prev);
+ lruvec->cma_list[lru] = &page->lru;
+ } else
+ list_move(&page->lru, &lruvec->lists[lru]);
+ #else
+ list_move(&page->lru, &lruvec->lists[lru]);
+ #endif /* CONFIG_AMLOGIC_CMA */
+ pgmoved += nr_pages;
if (put_page_testzero(page)) {
__ClearPageLRU(page);
@@ -2100,10 +2128,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb)
inactive_ratio = int_sqrt(10 * gb);
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
else if (!file && (totalram_pages >> (20 - PAGE_SHIFT)) >= 512)
inactive_ratio = 2;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
else
inactive_ratio = 1;
@@ -2258,10 +2286,10 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
* This scanning priority is essentially the inverse of IO cost.
*/
anon_prio = swappiness;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
if (get_nr_swap_pages() * 3 < total_swap_pages)
anon_prio >>= 1;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
file_prio = 200 - anon_prio;
/*
@@ -3488,6 +3516,9 @@ static int kswapd(void *p)
pgdat->kswapd_order = alloc_order = reclaim_order = 0;
pgdat->kswapd_classzone_idx = classzone_idx = 0;
+#ifdef CONFIG_AMLOGIC_CMA
+ set_user_nice(current, -5);
+#endif /* CONFIG_AMLOGIC_CMA */
for ( ; ; ) {
bool ret;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 90738d1..bd26933 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1164,18 +1164,18 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
{
int order, mtype;
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
unsigned long total;
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
seq_printf(m, "Node %4d, zone %8s, type %12s ",
pgdat->node_id,
zone->name,
migratetype_names[mtype]);
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
total = 0;
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
for (order = 0; order < MAX_ORDER; ++order) {
unsigned long freecount = 0;
struct free_area *area;
@@ -1186,14 +1186,14 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
list_for_each(curr, &area->free_list[mtype])
freecount++;
seq_printf(m, "%6lu ", freecount);
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
total += (freecount << order);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
}
- #ifdef CONFIG_AMLOGIC_MODIFY
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
/* show total size for each migrate type*/
seq_printf(m, " %6lu", total);
- #endif /* CONFIG_AMLOGIC_MODIFY */
+ #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
seq_putc(m, '\n');
}
}
@@ -1208,9 +1208,9 @@ static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
for (order = 0; order < MAX_ORDER; ++order)
seq_printf(m, "%6d ", order);
-#ifdef CONFIG_AMLOGIC_MODIFY
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
seq_printf(m, "%s", " total");
-#endif /* CONFIG_AMLOGIC_MODIFY */
+#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */
seq_putc(m, '\n');
walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);