blob: e04366171cba5370078845a88f6c3f0e70a67fe2
1 | /* |
2 | * Re-map IO memory to kernel address space so that we can access it. |
3 | * This is needed for high PCI addresses that aren't mapped in the |
4 | * 640k-1MB IO memory area on PC's |
5 | * |
6 | * (C) Copyright 1995 1996 Linus Torvalds |
7 | */ |
8 | #include <linux/vmalloc.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/sched.h> |
11 | #include <linux/io.h> |
12 | #include <linux/export.h> |
13 | #include <asm/cacheflush.h> |
14 | #include <asm/pgtable.h> |
15 | #ifdef CONFIG_AMLOGIC_DEBUG_FTRACE_PSTORE |
16 | #include <linux/moduleparam.h> |
17 | #include <linux/amlogic/debug_ftrace_ramoops.h> |
18 | #endif |
19 | |
20 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
21 | static int __read_mostly ioremap_pud_capable; |
22 | static int __read_mostly ioremap_pmd_capable; |
23 | static int __read_mostly ioremap_huge_disabled; |
24 | |
25 | static int __init set_nohugeiomap(char *str) |
26 | { |
27 | ioremap_huge_disabled = 1; |
28 | return 0; |
29 | } |
30 | early_param("nohugeiomap", set_nohugeiomap); |
31 | |
32 | void __init ioremap_huge_init(void) |
33 | { |
34 | if (!ioremap_huge_disabled) { |
35 | if (arch_ioremap_pud_supported()) |
36 | ioremap_pud_capable = 1; |
37 | if (arch_ioremap_pmd_supported()) |
38 | ioremap_pmd_capable = 1; |
39 | } |
40 | } |
41 | |
42 | static inline int ioremap_pud_enabled(void) |
43 | { |
44 | return ioremap_pud_capable; |
45 | } |
46 | |
47 | static inline int ioremap_pmd_enabled(void) |
48 | { |
49 | return ioremap_pmd_capable; |
50 | } |
51 | |
52 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
53 | static inline int ioremap_pud_enabled(void) { return 0; } |
54 | static inline int ioremap_pmd_enabled(void) { return 0; } |
55 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
56 | |
57 | static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, |
58 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
59 | { |
60 | pte_t *pte; |
61 | u64 pfn; |
62 | |
63 | pfn = phys_addr >> PAGE_SHIFT; |
64 | pte = pte_alloc_kernel(pmd, addr); |
65 | if (!pte) |
66 | return -ENOMEM; |
67 | do { |
68 | BUG_ON(!pte_none(*pte)); |
69 | set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); |
70 | pfn++; |
71 | } while (pte++, addr += PAGE_SIZE, addr != end); |
72 | return 0; |
73 | } |
74 | |
75 | static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, |
76 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
77 | { |
78 | pmd_t *pmd; |
79 | unsigned long next; |
80 | |
81 | phys_addr -= addr; |
82 | pmd = pmd_alloc(&init_mm, pud, addr); |
83 | if (!pmd) |
84 | return -ENOMEM; |
85 | do { |
86 | next = pmd_addr_end(addr, end); |
87 | |
88 | if (ioremap_pmd_enabled() && |
89 | ((next - addr) == PMD_SIZE) && |
90 | IS_ALIGNED(phys_addr + addr, PMD_SIZE) && |
91 | pmd_free_pte_page(pmd, addr)) { |
92 | if (pmd_set_huge(pmd, phys_addr + addr, prot)) |
93 | continue; |
94 | } |
95 | |
96 | if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot)) |
97 | return -ENOMEM; |
98 | } while (pmd++, addr = next, addr != end); |
99 | return 0; |
100 | } |
101 | |
102 | static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, |
103 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
104 | { |
105 | pud_t *pud; |
106 | unsigned long next; |
107 | |
108 | phys_addr -= addr; |
109 | pud = pud_alloc(&init_mm, pgd, addr); |
110 | if (!pud) |
111 | return -ENOMEM; |
112 | do { |
113 | next = pud_addr_end(addr, end); |
114 | |
115 | if (ioremap_pud_enabled() && |
116 | ((next - addr) == PUD_SIZE) && |
117 | IS_ALIGNED(phys_addr + addr, PUD_SIZE) && |
118 | pud_free_pmd_page(pud, addr)) { |
119 | if (pud_set_huge(pud, phys_addr + addr, prot)) |
120 | continue; |
121 | } |
122 | |
123 | if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot)) |
124 | return -ENOMEM; |
125 | } while (pud++, addr = next, addr != end); |
126 | return 0; |
127 | } |
128 | |
129 | #ifdef CONFIG_AMLOGIC_DEBUG_FTRACE_PSTORE |
130 | bool is_normal_memory(pgprot_t p) |
131 | { |
132 | #if defined(CONFIG_ARM) |
133 | return ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC); |
134 | #elif defined(CONFIG_ARM64) |
135 | return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL); |
136 | #else |
137 | #error "Unuspported architecture" |
138 | #endif |
139 | } |
140 | #endif |
141 | |
142 | int ioremap_page_range(unsigned long addr, |
143 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
144 | { |
145 | pgd_t *pgd; |
146 | unsigned long start; |
147 | unsigned long next; |
148 | int err; |
149 | |
150 | BUG_ON(addr >= end); |
151 | |
152 | start = addr; |
153 | phys_addr -= addr; |
154 | pgd = pgd_offset_k(addr); |
155 | do { |
156 | next = pgd_addr_end(addr, end); |
157 | err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot); |
158 | if (err) |
159 | break; |
160 | } while (pgd++, addr = next, addr != end); |
161 | |
162 | flush_cache_vmap(start, end); |
163 | #ifdef CONFIG_AMLOGIC_DEBUG_FTRACE_PSTORE |
164 | if (need_dump_iomap() && !is_normal_memory(prot)) |
165 | pr_err("io__map <va:0x%08lx-0x%08lx> pa:0x%lx,port:0x%lx\n", |
166 | start, end, (unsigned long)phys_addr, |
167 | (unsigned long)pgprot_val(prot)); |
168 | #endif |
169 | return err; |
170 | } |
171 | EXPORT_SYMBOL_GPL(ioremap_page_range); |
172 |