summaryrefslogtreecommitdiff
authorTao Zeng <tao.zeng@amlogic.com>2019-06-14 07:06:40 (GMT)
committer Jianxin Pan <jianxin.pan@amlogic.com>2019-06-20 05:50:16 (GMT)
commit4a456864cacbba0378f3e4438db0594b19c9b5fe (patch)
tree5f772441ce077b869177445097e469cc3a7d053b
parent593165ef57328fe353e379870a9bac8c902dff60 (diff)
downloadcommon-4a456864cacbba0378f3e4438db0594b19c9b5fe.zip
common-4a456864cacbba0378f3e4438db0594b19c9b5fe.tar.gz
common-4a456864cacbba0378f3e4438db0594b19c9b5fe.tar.bz2
mm: fix cma allocation time too long [1/1]
PD#TV-6340 Problem: When quickly enter live tv just after boot to home, video may display more than 10 seconds late after sound comeout. The main problem is cma allocation time too long. Solution: 1, add a page flag for pages under cma allocating. And do not increase page-ref count for cma pages under allocating when it is used by user space again. 2, restrict shmem/swap back pages using cma 3, improve cma using policy check in page allocating process. 4, replace righ page trace for migrated pages. Change-Id: Ie6b591213a9eda974c3443ca9b491fa8d00cee50 Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
Diffstat
-rw-r--r--arch/arm/include/asm/system_misc.h4
-rw-r--r--arch/arm64/include/asm/system_misc.h4
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--drivers/amlogic/memory_ext/aml_cma.c56
-rw-r--r--include/linux/page-flags.h16
-rw-r--r--include/trace/events/mmflags.h6
-rw-r--r--mm/cma.c13
-rw-r--r--mm/compaction.c17
-rw-r--r--mm/ksm.c11
-rw-r--r--mm/memory.c12
-rw-r--r--mm/migrate.c26
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/swap_state.c5
-rw-r--r--mm/vmscan.c4
15 files changed, 191 insertions, 13 deletions
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 80135b5..c42f381 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -19,10 +19,14 @@ extern void (*arm_pm_idle)(void);
#ifdef CONFIG_AMLOGIC_USER_FAULT
extern void show_all_pfn(struct task_struct *task, struct pt_regs *regs);
+extern void show_vma(struct mm_struct *mm, unsigned long addr);
#else
static inline void show_all_pfn(struct task_struct *task, struct pt_regs *regs)
{
}
+static inline void show_vma(struct mm_struct *mm, unsigned long addr)
+{
+}
#endif /* CONFIG_AMLOGIC_USER_FAULT */
#define UDBG_UNDEFINED (1 << 0)
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 7f9f2eb..118a0f5 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -43,10 +43,14 @@ struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr);
#ifdef CONFIG_AMLOGIC_USER_FAULT
extern void show_all_pfn(struct task_struct *task, struct pt_regs *regs);
+extern void show_vma(struct mm_struct *mm, unsigned long addr);
#else
static inline void show_all_pfn(struct task_struct *task, struct pt_regs *regs)
{
}
+static inline void show_vma(struct mm_struct *mm, unsigned long addr)
+{
+}
#endif /* CONFIG_AMLOGIC_USER_FAULT */
extern void __show_regs(struct pt_regs *);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 517bec0..c9290ac 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -361,7 +361,7 @@ static void show_user_extra_register_data(struct pt_regs *regs, int nbytes)
set_fs(fs);
}
-static void show_vma(struct mm_struct *mm, unsigned long addr)
+void show_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
struct file *file;
diff --git a/drivers/amlogic/memory_ext/aml_cma.c b/drivers/amlogic/memory_ext/aml_cma.c
index 563cfb7..809cf13 100644
--- a/drivers/amlogic/memory_ext/aml_cma.c
+++ b/drivers/amlogic/memory_ext/aml_cma.c
@@ -19,6 +19,7 @@
#include <linux/compiler.h>
#include <linux/mm.h>
#include <linux/kernel.h>
+#include <linux/rmap.h>
#include <linux/kthread.h>
#include <linux/sched/rt.h>
#include <linux/completion.h>
@@ -31,6 +32,7 @@
#include <linux/amlogic/aml_cma.h>
#include <linux/hugetlb.h>
#include <linux/proc_fs.h>
+#include <asm/system_misc.h>
#include <trace/events/page_isolation.h>
#ifdef CONFIG_AMLOGIC_PAGE_TRACE
#include <linux/amlogic/page_trace.h>
@@ -228,21 +230,42 @@ static struct page *get_migrate_page(struct page *page, unsigned long private,
int **resultp)
{
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_BDEV;
+ struct page *new = NULL;
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+ struct page_trace *old_trace, *new_trace;
+#endif
/*
* TODO: allocate a destination hugepage from a nearest neighbor node,
* accordance with memory policy of the user process if possible. For
* now as a simple work-around, we use the next node for destination.
*/
- if (PageHuge(page))
- return alloc_huge_page_node(page_hstate(compound_head(page)),
+ if (PageHuge(page)) {
+ new = alloc_huge_page_node(page_hstate(compound_head(page)),
next_node_in(page_to_nid(page),
node_online_map));
+ #ifdef CONFIG_AMLOGIC_PAGE_TRACE
+ if (new) {
+ old_trace = find_page_base(page);
+ new_trace = find_page_base(new);
+ *new_trace = *old_trace;
+ }
+ #endif
+ return new;
+ }
if (PageHighMem(page))
gfp_mask |= __GFP_HIGHMEM;
- return alloc_page(gfp_mask);
+ new = alloc_page(gfp_mask);
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+ if (new) {
+ old_trace = find_page_base(page);
+ new_trace = find_page_base(new);
+ *new_trace = *old_trace;
+ }
+#endif
+ return new;
}
/* [start, end) must belong to a single zone. */
@@ -716,6 +739,31 @@ void aml_cma_free(unsigned long pfn, unsigned int nr_pages)
}
EXPORT_SYMBOL(aml_cma_free);
+static int cma_vma_show(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, void *arg)
+{
+#ifdef CONFIG_AMLOGIC_USER_FAULT
+ struct mm_struct *mm = vma->vm_mm;
+
+ show_vma(mm, addr);
+#endif
+ return SWAP_AGAIN;
+}
+
+void rmap_walk_vma(struct page *page)
+{
+ struct rmap_walk_control rwc = {
+ .rmap_one = cma_vma_show,
+ };
+
+ pr_info("%s, show map for page:%lx,f:%lx, m:%p, p:%d\n",
+ __func__, page_to_pfn(page), page->flags,
+ page->mapping, page_count(page));
+ if (!page_mapping(page))
+ return;
+ rmap_walk(page, &rwc);
+}
+
void show_page(struct page *page)
{
unsigned long trace = 0;
@@ -733,6 +781,8 @@ void show_page(struct page *page)
page->flags & 0xffffffff,
page_mapcount(page), page_count(page),
(void *)trace);
+ if (cma_debug_level > 4)
+ rmap_walk_vma(page);
}
static int cma_debug_show(struct seq_file *m, void *arg)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 74e4dda..feebade 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -105,6 +105,9 @@ enum pageflags {
PG_young,
PG_idle,
#endif
+#ifdef CONFIG_AMLOGIC_CMA
+ PG_cma_allocating, /* indicate page is under cma allocating */
+#endif
__NR_PAGEFLAGS,
/* Filesystems */
@@ -265,6 +268,9 @@ PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
__PAGEFLAG(Slab, slab, PF_NO_TAIL)
__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
+#ifdef CONFIG_AMLOGIC_CMA
+PAGEFLAG(CmaAllocating, cma_allocating, PF_ANY)
+#endif
/* Xen */
PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
@@ -701,12 +707,22 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* Flags checked when a page is freed. Pages being freed should not have
* these flags set. It they are, there is a problem.
*/
+#ifdef CONFIG_AMLOGIC_CMA
+#define PAGE_FLAGS_CHECK_AT_FREE \
+ (1UL << PG_lru | 1UL << PG_locked | \
+ 1UL << PG_private | 1UL << PG_private_2 | \
+ 1UL << PG_writeback | 1UL << PG_reserved | \
+ 1UL << PG_cma_allocating | \
+ 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \
+ 1UL << PG_unevictable | __PG_MLOCKED)
+#else
#define PAGE_FLAGS_CHECK_AT_FREE \
(1UL << PG_lru | 1UL << PG_locked | \
1UL << PG_private | 1UL << PG_private_2 | \
1UL << PG_writeback | 1UL << PG_reserved | \
1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \
1UL << PG_unevictable | __PG_MLOCKED)
+#endif
/*
* Flags checked when a page is prepped for return by the page allocator.
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 5a81ab4..dd48b6b 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -78,6 +78,11 @@
#else
#define IF_HAVE_PG_IDLE(flag,string)
#endif
+#ifdef CONFIG_AMLOGIC_CMA
+#define PG_CMA_ALLOCATING(flag, string) ,{1UL << flag, string}
+#else
+#define PG_CMA_ALLOCATING(flag, string)
+#endif
#define __def_pageflag_names \
{1UL << PG_locked, "locked" }, \
@@ -100,6 +105,7 @@
{1UL << PG_reclaim, "reclaim" }, \
{1UL << PG_swapbacked, "swapbacked" }, \
{1UL << PG_unevictable, "unevictable" } \
+PG_CMA_ALLOCATING(PG_cma_allocating, "cma_allocating") \
IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
diff --git a/mm/cma.c b/mm/cma.c
index 7cc239a..d903797 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -39,6 +39,7 @@
#ifdef CONFIG_AMLOGIC_CMA
#include <asm/pgtable.h>
#include <linux/amlogic/aml_cma.h>
+#include <linux/delay.h>
#include <linux/amlogic/secmon.h>
#endif /* CONFIG_AMLOGIC_CMA */
@@ -513,6 +514,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
#ifdef CONFIG_AMLOGIC_CMA
int dummy;
unsigned long long tick;
+ unsigned long long in_tick, timeout;
+
+ in_tick = sched_clock();
#endif /* CONFIG_AMLOGIC_CMA */
if (!cma || !cma->count)
@@ -525,6 +529,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
tick = sched_clock();
cma_debug(0, NULL, "(cma %p, count %zu, align %d)\n",
(void *)cma, count, align);
+ in_tick = sched_clock();
+ timeout = 2ULL * 1000000 * (1 + ((count * PAGE_SIZE) >> 20));
#endif
if (!count)
return NULL;
@@ -580,6 +586,13 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
#ifndef CONFIG_AMLOGIC_CMA
/* try again with a bit different memory target */
start = bitmap_no + mask + 1;
+ #else
+ /*
+ * CMA allocation time out, may blocked on some pages
+ * relax CPU and try later
+ */
+ if ((sched_clock() - in_tick) >= timeout)
+ usleep_range(1000, 2000);
#endif /* CONFIG_AMLOGIC_CMA */
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 665760f..372b953 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -21,6 +21,9 @@
#include <linux/freezer.h>
#include <linux/page_owner.h>
#include "internal.h"
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+#include <linux/amlogic/page_trace.h>
+#endif
#ifdef CONFIG_COMPACTION
static inline void count_compact_event(enum vm_event_item item)
@@ -972,6 +975,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
isolate_success:
list_add(&page->lru, &cc->migratepages);
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (cc->page_type == COMPACT_CMA)
+ SetPageCmaAllocating(page);
+ #endif
cc->nr_migratepages++;
nr_isolated++;
@@ -1261,6 +1268,9 @@ static struct page *compaction_alloc(struct page *migratepage,
{
struct compact_control *cc = (struct compact_control *)data;
struct page *freepage;
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+ struct page_trace *old_trace, *new_trace;
+#endif
/*
* Isolate free pages if necessary, and if we are not aborting due to
@@ -1277,6 +1287,13 @@ static struct page *compaction_alloc(struct page *migratepage,
freepage = list_entry(cc->freepages.next, struct page, lru);
list_del(&freepage->lru);
cc->nr_freepages--;
+#ifdef CONFIG_AMLOGIC_PAGE_TRACE
+ if (freepage) {
+ old_trace = find_page_base(migratepage);
+ new_trace = find_page_base(freepage);
+ *new_trace = *old_trace;
+ }
+#endif
return freepage;
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 614b2cc..95f902c 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1468,6 +1468,17 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
tree_rmap_item =
unstable_tree_search_insert(rmap_item, page, &tree_page);
+#ifdef CONFIG_AMLOGIC_CMA
+ /*
+ * Now page is inserted to unstable tree, but do not
+ * let cma page to be kpage, it can be merged with other pages
+ */
+ if (cma_page(page)) {
+ if (tree_rmap_item)
+ put_page(tree_page);
+ return;
+ }
+#endif /* CONFIG_AMLOGIC_CMA */
if (tree_rmap_item) {
bool split;
diff --git a/mm/memory.c b/mm/memory.c
index d2db2c4..0c7b6ef 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2156,12 +2156,24 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
goto oom;
if (is_zero_pfn(pte_pfn(orig_pte))) {
+ #ifdef CONFIG_AMLOGIC_CMA
+ gfp_t tmp_flag = __GFP_MOVABLE | __GFP_BDEV;
+
+ new_page = __alloc_zeroed_user_highpage(tmp_flag, vma,
+ fe->address);
+ #else
new_page = alloc_zeroed_user_highpage_movable(vma, fe->address);
+ #endif
if (!new_page)
goto oom;
} else {
+ #ifdef CONFIG_AMLOGIC_CMA
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_BDEV,
+ vma, fe->address);
+ #else
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
fe->address);
+ #endif
if (!new_page)
goto oom;
cow_user_page(new_page, old_page, fe->address, vma);
diff --git a/mm/migrate.c b/mm/migrate.c
index a943ef4..bdee2ba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -164,6 +164,10 @@ void putback_movable_pages(struct list_head *l)
struct page *page2;
list_for_each_entry_safe(page, page2, l, lru) {
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (PageCmaAllocating(page)) /* migrate/reclaim failed */
+ ClearPageCmaAllocating(page);
+ #endif
if (unlikely(PageHuge(page))) {
putback_active_hugepage(page);
continue;
@@ -301,6 +305,9 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
pte_t pte;
swp_entry_t entry;
struct page *page;
+#ifdef CONFIG_AMLOGIC_CMA
+ bool need_wait = 0;
+#endif
spin_lock(ptl);
pte = *ptep;
@@ -312,6 +319,17 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
goto out;
page = migration_entry_to_page(entry);
+#ifdef CONFIG_AMLOGIC_CMA
+ /* This page is under cma allocating, do not increase it ref */
+ if (PageCmaAllocating(page)) {
+ pr_debug("%s, Page:%lx, flags:%lx, m:%d, c:%d, map:%p\n",
+ __func__, page_to_pfn(page), page->flags,
+ page_mapcount(page), page_count(page),
+ page->mapping);
+ need_wait = 1;
+ goto out;
+ }
+#endif
/*
* Once radix-tree replacement of page migration started, page_count
@@ -328,6 +346,10 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
return;
out:
pte_unmap_unlock(ptep, ptl);
+#ifdef CONFIG_AMLOGIC_CMA
+ if (need_wait)
+ schedule_timeout(1);
+#endif
}
void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
@@ -1196,6 +1218,10 @@ put_new:
else
*result = page_to_nid(newpage);
}
+#ifdef CONFIG_AMLOGIC_CMA
+ if (reason == MR_CMA && rc == MIGRATEPAGE_SUCCESS)
+ ClearPageCmaAllocating(page);
+#endif
return rc;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7118cf8..b453904 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2283,7 +2283,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
*/
#ifdef CONFIG_AMLOGIC_CMA
static struct page *__rmqueue(struct zone *zone, unsigned int order,
- int migratetype, gfp_t gfp_flags)
+ int migratetype, bool cma)
#else
static struct page *__rmqueue(struct zone *zone, unsigned int order,
int migratetype)
@@ -2293,7 +2293,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
#ifdef CONFIG_AMLOGIC_CMA
/* use CMA first */
- if (migratetype == MIGRATE_MOVABLE && can_use_cma(gfp_flags)) {
+ if (migratetype == MIGRATE_MOVABLE && cma) {
page = __rmqueue_cma_fallback(zone, order);
if (page) {
trace_mm_page_alloc_zone_locked(page, order,
@@ -2347,7 +2347,7 @@ static struct page *rmqueue_no_cma(struct zone *zone, unsigned int order,
#ifdef CONFIG_AMLOGIC_CMA
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
- int migratetype, bool cold, gfp_t flags)
+ int migratetype, bool cold, bool cma)
#else
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
@@ -2359,7 +2359,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
#ifdef CONFIG_AMLOGIC_CMA
- struct page *page = __rmqueue(zone, order, migratetype, flags);
+ struct page *page = __rmqueue(zone, order, migratetype, cma);
#else
struct page *page = __rmqueue(zone, order, migratetype);
#endif /* CONFIG_AMLOGIC_CMA */
@@ -2768,6 +2768,9 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
unsigned long flags;
struct page *page;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
+#ifdef CONFIG_AMLOGIC_CMA
+ bool cma = can_use_cma(gfp_flags);
+#endif
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
@@ -2782,7 +2785,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold,
- gfp_flags);
+ cma);
#else
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
@@ -2809,8 +2812,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
* For 2, we should replace with a cma page
* before page is deleted from PCP list.
*/
- if (!can_use_cma(gfp_flags) &&
- is_migrate_cma_page(page)) {
+ if (!cma && is_migrate_cma_page(page)) {
/* case 1 */
page = rmqueue_no_cma(zone, order, migratetype);
if (page)
@@ -2818,7 +2820,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
goto failed;
} else if ((migratetype == MIGRATE_MOVABLE) &&
(get_pcppage_migratetype(page) != MIGRATE_CMA) &&
- can_use_cma(gfp_flags)) {
+ cma) {
struct page *tmp_page;
spin_lock(&zone->lock);
@@ -2859,7 +2861,7 @@ use_pcp:
if (!page)
#ifdef CONFIG_AMLOGIC_CMA
page = __rmqueue(zone, order,
- migratetype, gfp_flags);
+ migratetype, cma);
#else
page = __rmqueue(zone, order, migratetype);
#endif /* CONFIG_AMLOGIC_CMA */
diff --git a/mm/shmem.c b/mm/shmem.c
index 61a39aa..4d82d40 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1373,7 +1373,11 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
struct page *page;
shmem_pseudo_vma_init(&pvma, info, index);
+#ifdef CONFIG_AMLOGIC_CMA
+ page = swapin_readahead(swap, gfp | __GFP_BDEV, &pvma, 0);
+#else
page = swapin_readahead(swap, gfp, &pvma, 0);
+#endif
shmem_pseudo_vma_destroy(&pvma);
return page;
@@ -1417,7 +1421,11 @@ static struct page *shmem_alloc_page(gfp_t gfp,
struct page *page;
shmem_pseudo_vma_init(&pvma, info, index);
+#ifdef CONFIG_AMLOGIC_CMA
+ page = alloc_page_vma(gfp | __GFP_BDEV, &pvma, 0);
+#else
page = alloc_page_vma(gfp, &pvma, 0);
+#endif
shmem_pseudo_vma_destroy(&pvma);
return page;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 35d7e0e..7038dcd 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -318,7 +318,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* Get a new page to read into from swap.
*/
if (!new_page) {
+ #ifdef CONFIG_AMLOGIC_CMA
+ new_page = alloc_page_vma(gfp_mask | __GFP_BDEV,
+ vma, addr);
+ #else
new_page = alloc_page_vma(gfp_mask, vma, addr);
+ #endif
if (!new_page)
break; /* Out of memory */
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1efc1f9..2ea4b00 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1266,6 +1266,10 @@ free_it:
* appear not as the counts should be low
*/
list_add(&page->lru, &free_pages);
+ #ifdef CONFIG_AMLOGIC_CMA
+ if (ttu_flags & TTU_IGNORE_ACCESS)
+ ClearPageCmaAllocating(page);
+ #endif
continue;
cull_mlocked: