blob: 574c67b663fe8a6ef802b36cb0379d21c96cb77c
1 | /* |
2 | * Virtual Memory Map support |
3 | * |
4 | * (C) 2007 sgi. Christoph Lameter. |
5 | * |
6 | * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, |
7 | * virt_to_page, page_address() to be implemented as a base offset |
8 | * calculation without memory access. |
9 | * |
10 | * However, virtual mappings need a page table and TLBs. Many Linux |
11 | * architectures already map their physical space using 1-1 mappings |
12 | * via TLBs. For those arches the virtual memory map is essentially |
13 | * for free if we use the same page size as the 1-1 mappings. In that |
14 | * case the overhead consists of a few additional pages that are |
15 | * allocated to create a view of memory for vmemmap. |
16 | * |
17 | * The architecture is expected to provide a vmemmap_populate() function |
18 | * to instantiate the mapping. |
19 | */ |
20 | #include <linux/mm.h> |
21 | #include <linux/mmzone.h> |
22 | #include <linux/bootmem.h> |
23 | #include <linux/memremap.h> |
24 | #include <linux/highmem.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/spinlock.h> |
27 | #include <linux/vmalloc.h> |
28 | #include <linux/sched.h> |
29 | #include <asm/dma.h> |
30 | #include <asm/pgalloc.h> |
31 | #include <asm/pgtable.h> |
32 | |
33 | /* |
34 | * Allocate a block of memory to be used to back the virtual memory map |
35 | * or to back the page tables that are used to create the mapping. |
36 | * Uses the main allocators if they are available, else bootmem. |
37 | */ |
38 | |
39 | static void * __ref __earlyonly_bootmem_alloc(int node, |
40 | unsigned long size, |
41 | unsigned long align, |
42 | unsigned long goal) |
43 | { |
44 | return memblock_virt_alloc_try_nid(size, align, goal, |
45 | BOOTMEM_ALLOC_ACCESSIBLE, node); |
46 | } |
47 | |
48 | static void *vmemmap_buf; |
49 | static void *vmemmap_buf_end; |
50 | |
51 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
52 | { |
53 | /* If the main allocator is up use that, fallback to bootmem. */ |
54 | if (slab_is_available()) { |
55 | struct page *page; |
56 | |
57 | if (node_state(node, N_HIGH_MEMORY)) |
58 | page = alloc_pages_node( |
59 | node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT, |
60 | get_order(size)); |
61 | else |
62 | page = alloc_pages( |
63 | GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT, |
64 | get_order(size)); |
65 | if (page) |
66 | return page_address(page); |
67 | return NULL; |
68 | } else |
69 | return __earlyonly_bootmem_alloc(node, size, size, |
70 | __pa(MAX_DMA_ADDRESS)); |
71 | } |
72 | |
73 | /* need to make sure size is all the same during early stage */ |
74 | static void * __meminit alloc_block_buf(unsigned long size, int node) |
75 | { |
76 | void *ptr; |
77 | |
78 | if (!vmemmap_buf) |
79 | return vmemmap_alloc_block(size, node); |
80 | |
81 | /* take the from buf */ |
82 | ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); |
83 | if (ptr + size > vmemmap_buf_end) |
84 | return vmemmap_alloc_block(size, node); |
85 | |
86 | vmemmap_buf = ptr + size; |
87 | |
88 | return ptr; |
89 | } |
90 | |
91 | static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) |
92 | { |
93 | return altmap->base_pfn + altmap->reserve + altmap->alloc |
94 | + altmap->align; |
95 | } |
96 | |
97 | static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) |
98 | { |
99 | unsigned long allocated = altmap->alloc + altmap->align; |
100 | |
101 | if (altmap->free > allocated) |
102 | return altmap->free - allocated; |
103 | return 0; |
104 | } |
105 | |
106 | /** |
107 | * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation |
108 | * @altmap - reserved page pool for the allocation |
109 | * @nr_pfns - size (in pages) of the allocation |
110 | * |
111 | * Allocations are aligned to the size of the request |
112 | */ |
113 | static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap, |
114 | unsigned long nr_pfns) |
115 | { |
116 | unsigned long pfn = vmem_altmap_next_pfn(altmap); |
117 | unsigned long nr_align; |
118 | |
119 | nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); |
120 | nr_align = ALIGN(pfn, nr_align) - pfn; |
121 | |
122 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) |
123 | return ULONG_MAX; |
124 | altmap->alloc += nr_pfns; |
125 | altmap->align += nr_align; |
126 | return pfn + nr_align; |
127 | } |
128 | |
129 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
130 | struct vmem_altmap *altmap) |
131 | { |
132 | unsigned long pfn, nr_pfns; |
133 | void *ptr; |
134 | |
135 | if (size & ~PAGE_MASK) { |
136 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", |
137 | __func__, size); |
138 | return NULL; |
139 | } |
140 | |
141 | nr_pfns = size >> PAGE_SHIFT; |
142 | pfn = vmem_altmap_alloc(altmap, nr_pfns); |
143 | if (pfn < ULONG_MAX) |
144 | ptr = __va(__pfn_to_phys(pfn)); |
145 | else |
146 | ptr = NULL; |
147 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", |
148 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); |
149 | |
150 | return ptr; |
151 | } |
152 | |
153 | /* need to make sure size is all the same during early stage */ |
154 | void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node, |
155 | struct vmem_altmap *altmap) |
156 | { |
157 | if (altmap) |
158 | return altmap_alloc_block_buf(size, altmap); |
159 | return alloc_block_buf(size, node); |
160 | } |
161 | |
162 | void __meminit vmemmap_verify(pte_t *pte, int node, |
163 | unsigned long start, unsigned long end) |
164 | { |
165 | unsigned long pfn = pte_pfn(*pte); |
166 | int actual_node = early_pfn_to_nid(pfn); |
167 | |
168 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
169 | pr_warn("[%lx-%lx] potential offnode page_structs\n", |
170 | start, end - 1); |
171 | } |
172 | |
173 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
174 | { |
175 | pte_t *pte = pte_offset_kernel(pmd, addr); |
176 | if (pte_none(*pte)) { |
177 | pte_t entry; |
178 | void *p = alloc_block_buf(PAGE_SIZE, node); |
179 | if (!p) |
180 | return NULL; |
181 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
182 | set_pte_at(&init_mm, addr, pte, entry); |
183 | } |
184 | return pte; |
185 | } |
186 | |
187 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
188 | { |
189 | pmd_t *pmd = pmd_offset(pud, addr); |
190 | if (pmd_none(*pmd)) { |
191 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
192 | if (!p) |
193 | return NULL; |
194 | pmd_populate_kernel(&init_mm, pmd, p); |
195 | } |
196 | return pmd; |
197 | } |
198 | |
199 | pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) |
200 | { |
201 | pud_t *pud = pud_offset(pgd, addr); |
202 | if (pud_none(*pud)) { |
203 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
204 | if (!p) |
205 | return NULL; |
206 | pud_populate(&init_mm, pud, p); |
207 | } |
208 | return pud; |
209 | } |
210 | |
211 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
212 | { |
213 | pgd_t *pgd = pgd_offset_k(addr); |
214 | if (pgd_none(*pgd)) { |
215 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
216 | if (!p) |
217 | return NULL; |
218 | pgd_populate(&init_mm, pgd, p); |
219 | } |
220 | return pgd; |
221 | } |
222 | |
223 | int __meminit vmemmap_populate_basepages(unsigned long start, |
224 | unsigned long end, int node) |
225 | { |
226 | unsigned long addr = start; |
227 | pgd_t *pgd; |
228 | pud_t *pud; |
229 | pmd_t *pmd; |
230 | pte_t *pte; |
231 | |
232 | for (; addr < end; addr += PAGE_SIZE) { |
233 | pgd = vmemmap_pgd_populate(addr, node); |
234 | if (!pgd) |
235 | return -ENOMEM; |
236 | pud = vmemmap_pud_populate(pgd, addr, node); |
237 | if (!pud) |
238 | return -ENOMEM; |
239 | pmd = vmemmap_pmd_populate(pud, addr, node); |
240 | if (!pmd) |
241 | return -ENOMEM; |
242 | pte = vmemmap_pte_populate(pmd, addr, node); |
243 | if (!pte) |
244 | return -ENOMEM; |
245 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); |
246 | } |
247 | |
248 | return 0; |
249 | } |
250 | |
251 | struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) |
252 | { |
253 | unsigned long start; |
254 | unsigned long end; |
255 | struct page *map; |
256 | |
257 | map = pfn_to_page(pnum * PAGES_PER_SECTION); |
258 | start = (unsigned long)map; |
259 | end = (unsigned long)(map + PAGES_PER_SECTION); |
260 | |
261 | if (vmemmap_populate(start, end, nid)) |
262 | return NULL; |
263 | |
264 | return map; |
265 | } |
266 | |
267 | void __init sparse_mem_maps_populate_node(struct page **map_map, |
268 | unsigned long pnum_begin, |
269 | unsigned long pnum_end, |
270 | unsigned long map_count, int nodeid) |
271 | { |
272 | unsigned long pnum; |
273 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; |
274 | void *vmemmap_buf_start; |
275 | |
276 | size = ALIGN(size, PMD_SIZE); |
277 | vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, |
278 | PMD_SIZE, __pa(MAX_DMA_ADDRESS)); |
279 | |
280 | if (vmemmap_buf_start) { |
281 | vmemmap_buf = vmemmap_buf_start; |
282 | vmemmap_buf_end = vmemmap_buf_start + size * map_count; |
283 | } |
284 | |
285 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
286 | struct mem_section *ms; |
287 | |
288 | if (!present_section_nr(pnum)) |
289 | continue; |
290 | |
291 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); |
292 | if (map_map[pnum]) |
293 | continue; |
294 | ms = __nr_to_section(pnum); |
295 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
296 | __func__); |
297 | ms->section_mem_map = 0; |
298 | } |
299 | |
300 | if (vmemmap_buf_start) { |
301 | /* need to free left buf */ |
302 | memblock_free_early(__pa(vmemmap_buf), |
303 | vmemmap_buf_end - vmemmap_buf); |
304 | vmemmap_buf = NULL; |
305 | vmemmap_buf_end = NULL; |
306 | } |
307 | } |
308 |