summaryrefslogtreecommitdiff
authortao zeng <tao.zeng@amlogic.com>2017-12-21 10:07:37 (GMT)
committer jie.yuan <jie.yuan@amlogic.com>2018-03-20 03:06:32 (GMT)
commitddbc17867a80b8139d1cda909d753f25466da0ca (patch)
treef1f92d4b23cd8d63c5b4372bed09e225a6a0e853
parent16cffaaab36de952915a8125d920096f23a4f1e9 (diff)
downloadcommon-ddbc17867a80b8139d1cda909d753f25466da0ca.zip
common-ddbc17867a80b8139d1cda909d753f25466da0ca.tar.gz
common-ddbc17867a80b8139d1cda909d753f25466da0ca.tar.bz2
memory: merge modification of tao.zeng [3/9]
mm: fix lowmem issue PD#157252: mm: fix lowmem issue 1. add statistics for CMA pages; 2. reduce cma print and not protect cma unless driver called; 3. fix cma usage policy in alloc path; 4. increase file scan ratio in kswapd; 5. using NOOP for default IO-scheduler; 6. change alloc flags of ZRAM to retry harder. Change-Id: If7b0363da03da1682efe3996c69bb9d511299209 Signed-off-by: tao zeng <tao.zeng@amlogic.com>
Diffstat
-rw-r--r--arch/arm/configs/meson32_defconfig3
-rw-r--r--arch/arm64/boot/dts/amlogic/gxl_p241_1g.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/gxl_p241_1g_buildroot.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/gxl_p241_v2-1g.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/gxl_p241_v2_1g_buildroot.dts2
-rw-r--r--arch/arm64/configs/meson64_defconfig2
-rw-r--r--arch/arm64/configs/meson64_smarthome_defconfig2
-rw-r--r--drivers/block/zram/zram_drv.c9
-rw-r--r--include/linux/mm_inline.h29
-rw-r--r--mm/cma.c8
-rw-r--r--mm/page_alloc.c44
-rw-r--r--mm/vmscan.c32
12 files changed, 124 insertions, 17 deletions
diff --git a/arch/arm/configs/meson32_defconfig b/arch/arm/configs/meson32_defconfig
index 6ebf43d..993a05c 100644
--- a/arch/arm/configs/meson32_defconfig
+++ b/arch/arm/configs/meson32_defconfig
@@ -18,6 +18,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_CMDLINE_PARTITION=y
+CONFIG_DEFAULT_NOOP=y
CONFIG_ARCH_VIRT=y
CONFIG_ARCH_MESON=y
# CONFIG_MACH_MESON6 is not set
@@ -227,7 +228,7 @@ CONFIG_AMLOGIC_M8B_SM=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_CMA_SIZE_MBYTES=8
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
diff --git a/arch/arm64/boot/dts/amlogic/gxl_p241_1g.dts b/arch/arm64/boot/dts/amlogic/gxl_p241_1g.dts
index c4175c7..5dc7a2e2 100644
--- a/arch/arm64/boot/dts/amlogic/gxl_p241_1g.dts
+++ b/arch/arm64/boot/dts/amlogic/gxl_p241_1g.dts
@@ -91,7 +91,7 @@
ion_reserved:linux,ion-dev {
compatible = "shared-dma-pool";
reusable;
- size = <0x0 0x5C00000>;
+ size = <0x0 0x3000000>;
alignment = <0x0 0x400000>;
};
@@ -123,7 +123,7 @@
compatible = "shared-dma-pool";
reusable;
/* ion_codec_mm max can alloc size 80M*/
- size = <0x0 0x13400000>;
+ size = <0x0 0x7800000>;
alignment = <0x0 0x400000>;
linux,contiguous-region;
};
diff --git a/arch/arm64/boot/dts/amlogic/gxl_p241_1g_buildroot.dts b/arch/arm64/boot/dts/amlogic/gxl_p241_1g_buildroot.dts
index 77fe520..6e2ea5f 100644
--- a/arch/arm64/boot/dts/amlogic/gxl_p241_1g_buildroot.dts
+++ b/arch/arm64/boot/dts/amlogic/gxl_p241_1g_buildroot.dts
@@ -121,7 +121,7 @@
codec_mm_cma:linux,codec_mm_cma {
compatible = "shared-dma-pool";
reusable;
- size = <0x0 0xb400000>;
+ size = <0x0 0x7800000>;
alignment = <0x0 0x400000>;
linux,contiguous-region;
};
diff --git a/arch/arm64/boot/dts/amlogic/gxl_p241_v2-1g.dts b/arch/arm64/boot/dts/amlogic/gxl_p241_v2-1g.dts
index 3e10d3e..09efec4 100644
--- a/arch/arm64/boot/dts/amlogic/gxl_p241_v2-1g.dts
+++ b/arch/arm64/boot/dts/amlogic/gxl_p241_v2-1g.dts
@@ -91,7 +91,7 @@
ion_reserved:linux,ion-dev {
compatible = "shared-dma-pool";
reusable;
- size = <0x0 0x5C00000>;
+ size = <0x0 0x3000000>;
alignment = <0x0 0x400000>;
};
@@ -123,7 +123,7 @@
compatible = "shared-dma-pool";
reusable;
/* ion_codec_mm max can alloc size 80M*/
- size = <0x0 0x13400000>;
+ size = <0x0 0x7800000>;
alignment = <0x0 0x400000>;
linux,contiguous-region;
};
diff --git a/arch/arm64/boot/dts/amlogic/gxl_p241_v2_1g_buildroot.dts b/arch/arm64/boot/dts/amlogic/gxl_p241_v2_1g_buildroot.dts
index 1e6f560..302b770 100644
--- a/arch/arm64/boot/dts/amlogic/gxl_p241_v2_1g_buildroot.dts
+++ b/arch/arm64/boot/dts/amlogic/gxl_p241_v2_1g_buildroot.dts
@@ -121,7 +121,7 @@
codec_mm_cma:linux,codec_mm_cma {
compatible = "shared-dma-pool";
reusable;
- size = <0x0 0xb400000>;
+ size = <0x0 0x7800000>;
alignment = <0x0 0x400000>;
linux,contiguous-region;
};
diff --git a/arch/arm64/configs/meson64_defconfig b/arch/arm64/configs/meson64_defconfig
index 1395f8b..0ce1cc5 100644
--- a/arch/arm64/configs/meson64_defconfig
+++ b/arch/arm64/configs/meson64_defconfig
@@ -29,6 +29,7 @@ CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
+CONFIG_DEFAULT_NOOP=y
CONFIG_PCI=y
CONFIG_PCIE_DW_PLAT=y
CONFIG_SCHED_MC=y
@@ -326,6 +327,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=8
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/arm64/configs/meson64_smarthome_defconfig b/arch/arm64/configs/meson64_smarthome_defconfig
index 8762164..2c9189a 100644
--- a/arch/arm64/configs/meson64_smarthome_defconfig
+++ b/arch/arm64/configs/meson64_smarthome_defconfig
@@ -23,6 +23,7 @@ CONFIG_PROFILING=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_DEFAULT_NOOP=y
CONFIG_PCI=y
CONFIG_PCIE_DW_PLAT=y
CONFIG_SCHED_MC=y
@@ -305,6 +306,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=8
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index c9914d6..896c688 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -755,9 +755,18 @@ compress_again:
atomic64_inc(&zram->stats.writestall);
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ handle = zs_malloc(meta->mem_pool, clen,
+ GFP_NOIO |
+ __GFP_HIGHMEM |
+ __GFP_NOWARN |
+ __GFP_REPEAT |
+ __GFP_MOVABLE);
+ #else
handle = zs_malloc(meta->mem_pool, clen,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
+ #endif
if (handle)
goto compress_again;
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 41d376e..2b91387 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -3,6 +3,9 @@
#include <linux/huge_mm.h>
#include <linux/swap.h>
+#ifdef CONFIG_AMLOGIC_MODIFY
+#include <linux/page-isolation.h>
+#endif /* CONFIG_AMLOGIC_MODIFY */
/**
* page_is_file_cache - should the page be on a file LRU or anon LRU?
@@ -46,15 +49,41 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
+#ifdef CONFIG_AMLOGIC_MODIFY
+ int nr_pages = hpage_nr_pages(page);
+ int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
+ int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MODIFY */
+
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
+
+#ifdef CONFIG_AMLOGIC_MODIFY
+ migrate_type = get_pageblock_migratetype(page);
+ if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type))
+ __mod_zone_page_state(page_zone(page),
+ NR_LRU_BASE + lru + num, nr_pages);
+#endif /* CONFIG_AMLOGIC_MODIFY */
}
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
+#ifdef CONFIG_AMLOGIC_MODIFY
+ int nr_pages = hpage_nr_pages(page);
+ int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
+ int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MODIFY */
+
list_del(&page->lru);
update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+
+#ifdef CONFIG_AMLOGIC_MODIFY
+ migrate_type = get_pageblock_migratetype(page);
+ if (is_migrate_cma(migrate_type) || is_migrate_isolate(migrate_type))
+ __mod_zone_page_state(page_zone(page),
+ NR_LRU_BASE + lru + num, -nr_pages);
+#endif /* CONFIG_AMLOGIC_MODIFY */
}
/**
diff --git a/mm/cma.c b/mm/cma.c
index a07c7ba..a51103a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -416,9 +416,6 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
if (bitmap_count > bitmap_maxno)
return NULL;
-#ifdef CONFIG_AMLOGIC_MODIFY
- atomic_inc(&cma_alloc_ref);
-#endif
for (;;) {
mutex_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
@@ -465,13 +462,10 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
trace_cma_alloc(pfn, page, count, align);
#ifdef CONFIG_AMLOGIC_MODIFY
- atomic_dec(&cma_alloc_ref);
if (page) {
atomic_long_add(count, &driver_alloc_cma);
update_cma_page_trace(page, count);
}
- WARN_ONCE(!page, "can't alloc from %lx with size:%ld, ret:%d\n",
- cma->base_pfn, count, ret);
#endif /* CONFIG_AMLOGIC_MODIFY */
pr_debug("%s(): returned %p\n", __func__, page);
return page;
@@ -522,7 +516,7 @@ bool cma_suitable(gfp_t gfp_mask)
return false;
/* try to reduce page lock wait for read */
- if (atomic_read(&cma_alloc_ref) && (gfp_mask & __GFP_COLD))
+ if (atomic_read(&cma_alloc_ref))
return false;
return true;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ecb2cf4..dc0ff7e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2314,14 +2314,19 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
* Do the hard work of removing an element from the buddy allocator.
* Call me with the zone->lock already held.
*/
+#ifdef CONFIG_AMLOGIC_MODIFY
+static struct page *__rmqueue(struct zone *zone, unsigned int order,
+ int migratetype, gfp_t gfp_flags)
+#else
static struct page *__rmqueue(struct zone *zone, unsigned int order,
int migratetype)
+#endif /* CONFIG_AMLOGIC_MODIFY */
{
struct page *page;
#ifdef CONFIG_AMLOGIC_MODIFY
/* use CMA first */
- if (migratetype == MIGRATE_MOVABLE) {
+ if (migratetype == MIGRATE_MOVABLE && cma_suitable(gfp_flags)) {
page = __rmqueue_cma_fallback(zone, order);
if (page) {
trace_mm_page_alloc_zone_locked(page, order,
@@ -2372,15 +2377,25 @@ static struct page *rmqueue_no_cma(struct zone *zone, unsigned int order,
* a single hold of the lock, for efficiency. Add them to the supplied list.
* Returns the number of new pages which were placed at *list.
*/
+#ifdef CONFIG_AMLOGIC_MODIFY
+static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ unsigned long count, struct list_head *list,
+ int migratetype, bool cold, gfp_t flags)
+#else
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, bool cold)
+#endif /* CONFIG_AMLOGIC_MODIFY */
{
int i, alloced = 0;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ struct page *page = __rmqueue(zone, order, migratetype, flags);
+ #else
struct page *page = __rmqueue(zone, order, migratetype);
+ #endif /* CONFIG_AMLOGIC_MODIFY */
if (unlikely(page == NULL))
break;
@@ -2803,9 +2818,16 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ pcp->count += rmqueue_bulk(zone, 0,
+ pcp->batch, list,
+ migratetype, cold,
+ gfp_flags);
+ #else
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold);
+ #endif /* CONFIG_AMLOGIC_MODIFY */
if (unlikely(list_empty(list)))
goto failed;
}
@@ -2875,7 +2897,12 @@ use_pcp:
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
if (!page)
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ page = __rmqueue(zone, order,
+ migratetype, gfp_flags);
+ #else
page = __rmqueue(zone, order, migratetype);
+ #endif /* CONFIG_AMLOGIC_MODIFY */
} while (page && check_new_pages(page, order));
spin_unlock(&zone->lock);
if (!page)
@@ -3986,7 +4013,7 @@ got_pg:
static inline void should_wakeup_kswap(gfp_t gfp_mask, int order,
struct alloc_context *ac)
{
- unsigned long free_pages;
+ unsigned long free_pages, free_cma = 0;
struct zoneref *z = ac->preferred_zoneref;
struct zone *zone;
@@ -3996,6 +4023,11 @@ static inline void should_wakeup_kswap(gfp_t gfp_mask, int order,
for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
ac->nodemask) {
free_pages = zone_page_state(zone, NR_FREE_PAGES);
+ #ifdef CONFIG_CMA
+ if (cma_suitable(gfp_mask))
+ free_cma = zone_page_state(zone, NR_FREE_CMA_PAGES);
+ #endif
+ free_pages -= free_cma;
/*
* wake up kswapd before get pages from buddy, this help to
* fast reclaim process and can avoid memory become too low
@@ -4584,7 +4616,7 @@ void show_free_areas(unsigned int filter)
" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
#ifdef CONFIG_AMLOGIC_MODIFY
- " driver_cma:%lu"
+ " [cma] driver:%lu anon:%lu file:%lu isolate:%lu total:%lu\n"
#endif /* CONFIG_AMLOGIC_MODIFY */
" free:%lu free_pcp:%lu free_cma:%lu\n",
global_node_page_state(NR_ACTIVE_ANON),
@@ -4605,6 +4637,12 @@ void show_free_areas(unsigned int filter)
global_page_state(NR_BOUNCE),
#ifdef CONFIG_AMLOGIC_MODIFY
get_driver_alloc_cma(),
+ global_page_state(NR_INACTIVE_ANON_CMA) +
+ global_page_state(NR_ACTIVE_ANON_CMA),
+ global_page_state(NR_INACTIVE_FILE_CMA) +
+ global_page_state(NR_ACTIVE_FILE_CMA),
+ global_page_state(NR_CMA_ISOLATED),
+ totalcma_pages,
#endif /* CONFIG_AMLOGIC_MODIFY */
global_page_state(NR_FREE_PAGES),
free_pcp,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c4ae05f..f2ca309 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1456,6 +1456,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long scan, nr_pages;
LIST_HEAD(pages_skipped);
+#ifdef CONFIG_AMLOGIC_MODIFY
+ int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
+ int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MODIFY */
for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
!list_empty(src);) {
@@ -1484,6 +1488,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
nr_taken += nr_pages;
nr_zone_taken[page_zonenum(page)] += nr_pages;
list_move(&page->lru, dst);
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ migrate_type = get_pageblock_migratetype(page);
+ if (is_migrate_cma(migrate_type) ||
+ is_migrate_isolate(migrate_type))
+ __mod_zone_page_state(page_zone(page),
+ NR_LRU_BASE + lru + num,
+ -nr_pages);
+ #endif /* CONFIG_AMLOGIC_MODIFY */
break;
case -EBUSY:
@@ -1896,6 +1908,10 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
unsigned long pgmoved = 0;
struct page *page;
int nr_pages;
+#ifdef CONFIG_AMLOGIC_MODIFY
+ int num = NR_INACTIVE_ANON_CMA - NR_INACTIVE_ANON;
+ int migrate_type = 0;
+#endif /* CONFIG_AMLOGIC_MODIFY */
while (!list_empty(list)) {
page = lru_to_page(list);
@@ -1908,6 +1924,14 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
list_move(&page->lru, &lruvec->lists[lru]);
pgmoved += nr_pages;
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ migrate_type = get_pageblock_migratetype(page);
+ if (is_migrate_cma(migrate_type) ||
+ is_migrate_isolate(migrate_type))
+ __mod_zone_page_state(page_zone(page),
+ NR_LRU_BASE + lru + num,
+ nr_pages);
+ #endif /* CONFIG_AMLOGIC_MODIFY */
if (put_page_testzero(page)) {
__ClearPageLRU(page);
@@ -2076,6 +2100,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb)
inactive_ratio = int_sqrt(10 * gb);
+#ifdef CONFIG_AMLOGIC_MODIFY
+ else if (!file && (totalram_pages >> (20 - PAGE_SHIFT)) >= 512)
+ inactive_ratio = 2;
+#endif /* CONFIG_AMLOGIC_MODIFY */
else
inactive_ratio = 1;
@@ -2230,6 +2258,10 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
* This scanning priority is essentially the inverse of IO cost.
*/
anon_prio = swappiness;
+#ifdef CONFIG_AMLOGIC_MODIFY
+ if (get_nr_swap_pages() * 3 < total_swap_pages)
+ anon_prio >>= 1;
+#endif /* CONFIG_AMLOGIC_MODIFY */
file_prio = 200 - anon_prio;
/*