summaryrefslogtreecommitdiff
authorJiamin Ma <jiamin.ma@amlogic.com>2018-03-12 06:05:57 (GMT)
committer jie.yuan <jie.yuan@amlogic.com>2018-03-20 03:06:52 (GMT)
commit7c90f2229dbfa587610ca779dc381c79a033ec22 (patch)
treeeb1b441c7858843adfd1a38af5d3d62119b24368
parent71a428b400070ab9ae60088c3775a3038764ee33 (diff)
downloadcommon-7c90f2229dbfa587610ca779dc381c79a033ec22.zip
common-7c90f2229dbfa587610ca779dc381c79a033ec22.tar.gz
common-7c90f2229dbfa587610ca779dc381c79a033ec22.tar.bz2
memory: merge modification of tao.zeng [5/9]
mm: check pfn overflow for low memory platform PD#158373: mm: check pfn overflow for low memory platform On ARM64 platform, when vmemmap_populate is called for reserve memory for struct page, it reserved based 1GB a loop. And 16MB memory will be reserved at least. This caused memory waste if total RAM is less than 1GB. Basically reserve memory size for struct page is calculated by: reserve size = sizeof(struct page) * number of pages. For example, currently struct page is 64 bytes on ARM64. A page is 4KB. So reserve size table can be: Memory | page count | reserve size(bytes) ------------------------------------------------- 128MB | 32768 | 2097152(2MB) 256MB | 65536 | 4194304(4MB) 512MB | 131072 | 8388608(8MB) Note, This reserve is aligned at 2MB. Change-Id: Ia7dbd250cc9aa37698071322f0ebc98bfcfb69db Signed-off-by: tao zeng <tao.zeng@amlogic.com> Signed-off-by: Jiamin Ma <jiamin.ma@amlogic.com>
Diffstat
-rw-r--r--arch/arm64/mm/mmu.c37
1 files changed, 37 insertions, 0 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 05615a3..eb8a729 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -30,6 +30,9 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
+#ifdef CONFIG_AMLOGIC_MODIFY
+#include <linux/bootmem.h>
+#endif /* CONFIG_AMLOGIC_MODIFY */
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -537,6 +540,25 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
return vmemmap_populate_basepages(start, end, node);
}
#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+
+#ifdef CONFIG_AMLOGIC_MODIFY
+static int __init check_pfn_overflow(unsigned long pfn)
+{
+ unsigned long pfn_up;
+ unsigned long size;
+ /*
+ * reserve pfn is larger than max_pfn, we don't need to reserve memory
+ * this can help for memory less than 1GB platform
+ */
+ size = sizeof(struct page);
+ pfn_up = ALIGN(max_pfn * size, PMD_SIZE);
+ pfn_up = (pfn_up + size - 1) / size; /* round up */
+ if (pfn >= pfn_up)
+ return -ERANGE;
+ return 0;
+}
+#endif /* CONFIG_AMLOGIC_MODIFY */
+
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
unsigned long addr = start;
@@ -544,10 +566,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
+#ifdef CONFIG_AMLOGIC_MODIFY
+ struct page *page;
+ page = (struct page *)start;
+#endif /* CONFIG_AMLOGIC_MODIFY */
do {
next = pmd_addr_end(addr, end);
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ /* page address may not just same as next */
+ while (((unsigned long)page) < next)
+ page++;
+ #endif /* CONFIG_AMLOGIC_MODIFY */
+
pgd = vmemmap_pgd_populate(addr, node);
if (!pgd)
return -ENOMEM;
@@ -567,6 +599,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
+
+ #ifdef CONFIG_AMLOGIC_MODIFY
+ if (check_pfn_overflow(page_to_pfn(page)))
+ break;
+ #endif /* CONFIG_AMLOGIC_MODIFY */
} while (addr = next, addr != end);
return 0;