blob: 8c4c82e358e639a296f0319ceb153d0d32b141cf
1 | /* |
2 | * sparse memory mappings. |
3 | */ |
4 | #include <linux/mm.h> |
5 | #include <linux/slab.h> |
6 | #include <linux/mmzone.h> |
7 | #include <linux/bootmem.h> |
8 | #include <linux/compiler.h> |
9 | #include <linux/highmem.h> |
10 | #include <linux/export.h> |
11 | #include <linux/spinlock.h> |
12 | #include <linux/vmalloc.h> |
13 | |
14 | #include "internal.h" |
15 | #include <asm/dma.h> |
16 | #include <asm/pgalloc.h> |
17 | #include <asm/pgtable.h> |
18 | |
19 | /* |
20 | * Permanent SPARSEMEM data: |
21 | * |
22 | * 1) mem_section - memory sections, mem_map's for valid memory |
23 | */ |
24 | #ifdef CONFIG_SPARSEMEM_EXTREME |
25 | struct mem_section *mem_section[NR_SECTION_ROOTS] |
26 | ____cacheline_internodealigned_in_smp; |
27 | #else |
28 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] |
29 | ____cacheline_internodealigned_in_smp; |
30 | #endif |
31 | EXPORT_SYMBOL(mem_section); |
32 | |
33 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
34 | /* |
35 | * If we did not store the node number in the page then we have to |
36 | * do a lookup in the section_to_node_table in order to find which |
37 | * node the page belongs to. |
38 | */ |
39 | #if MAX_NUMNODES <= 256 |
40 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
41 | #else |
42 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
43 | #endif |
44 | |
45 | int page_to_nid(const struct page *page) |
46 | { |
47 | return section_to_node_table[page_to_section(page)]; |
48 | } |
49 | EXPORT_SYMBOL(page_to_nid); |
50 | |
51 | static void set_section_nid(unsigned long section_nr, int nid) |
52 | { |
53 | section_to_node_table[section_nr] = nid; |
54 | } |
55 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ |
56 | static inline void set_section_nid(unsigned long section_nr, int nid) |
57 | { |
58 | } |
59 | #endif |
60 | |
61 | #ifdef CONFIG_SPARSEMEM_EXTREME |
62 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
63 | { |
64 | struct mem_section *section = NULL; |
65 | unsigned long array_size = SECTIONS_PER_ROOT * |
66 | sizeof(struct mem_section); |
67 | |
68 | if (slab_is_available()) { |
69 | if (node_state(nid, N_HIGH_MEMORY)) |
70 | section = kzalloc_node(array_size, GFP_KERNEL, nid); |
71 | else |
72 | section = kzalloc(array_size, GFP_KERNEL); |
73 | } else { |
74 | section = memblock_virt_alloc_node(array_size, nid); |
75 | } |
76 | |
77 | return section; |
78 | } |
79 | |
80 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
81 | { |
82 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
83 | struct mem_section *section; |
84 | |
85 | if (mem_section[root]) |
86 | return -EEXIST; |
87 | |
88 | section = sparse_index_alloc(nid); |
89 | if (!section) |
90 | return -ENOMEM; |
91 | |
92 | mem_section[root] = section; |
93 | |
94 | return 0; |
95 | } |
96 | #else /* !SPARSEMEM_EXTREME */ |
97 | static inline int sparse_index_init(unsigned long section_nr, int nid) |
98 | { |
99 | return 0; |
100 | } |
101 | #endif |
102 | |
103 | #ifdef CONFIG_SPARSEMEM_EXTREME |
104 | int __section_nr(struct mem_section* ms) |
105 | { |
106 | unsigned long root_nr; |
107 | struct mem_section* root; |
108 | |
109 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
110 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); |
111 | if (!root) |
112 | continue; |
113 | |
114 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) |
115 | break; |
116 | } |
117 | |
118 | VM_BUG_ON(root_nr == NR_SECTION_ROOTS); |
119 | |
120 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
121 | } |
122 | #else |
123 | int __section_nr(struct mem_section* ms) |
124 | { |
125 | return (int)(ms - mem_section[0]); |
126 | } |
127 | #endif |
128 | |
129 | /* |
130 | * During early boot, before section_mem_map is used for an actual |
131 | * mem_map, we use section_mem_map to store the section's NUMA |
132 | * node. This keeps us from having to use another data structure. The |
133 | * node information is cleared just before we store the real mem_map. |
134 | */ |
135 | static inline unsigned long sparse_encode_early_nid(int nid) |
136 | { |
137 | return (nid << SECTION_NID_SHIFT); |
138 | } |
139 | |
140 | static inline int sparse_early_nid(struct mem_section *section) |
141 | { |
142 | return (section->section_mem_map >> SECTION_NID_SHIFT); |
143 | } |
144 | |
145 | /* Validate the physical addressing limitations of the model */ |
146 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, |
147 | unsigned long *end_pfn) |
148 | { |
149 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
150 | |
151 | /* |
152 | * Sanity checks - do not allow an architecture to pass |
153 | * in larger pfns than the maximum scope of sparsemem: |
154 | */ |
155 | if (*start_pfn > max_sparsemem_pfn) { |
156 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
157 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
158 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
159 | WARN_ON_ONCE(1); |
160 | *start_pfn = max_sparsemem_pfn; |
161 | *end_pfn = max_sparsemem_pfn; |
162 | } else if (*end_pfn > max_sparsemem_pfn) { |
163 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
164 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
165 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
166 | WARN_ON_ONCE(1); |
167 | *end_pfn = max_sparsemem_pfn; |
168 | } |
169 | } |
170 | |
171 | /* Record a memory area against a node. */ |
172 | void __init memory_present(int nid, unsigned long start, unsigned long end) |
173 | { |
174 | unsigned long pfn; |
175 | |
176 | start &= PAGE_SECTION_MASK; |
177 | mminit_validate_memmodel_limits(&start, &end); |
178 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
179 | unsigned long section = pfn_to_section_nr(pfn); |
180 | struct mem_section *ms; |
181 | |
182 | sparse_index_init(section, nid); |
183 | set_section_nid(section, nid); |
184 | |
185 | ms = __nr_to_section(section); |
186 | if (!ms->section_mem_map) |
187 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
188 | SECTION_MARKED_PRESENT; |
189 | } |
190 | } |
191 | |
192 | /* |
193 | * Only used by the i386 NUMA architecures, but relatively |
194 | * generic code. |
195 | */ |
196 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, |
197 | unsigned long end_pfn) |
198 | { |
199 | unsigned long pfn; |
200 | unsigned long nr_pages = 0; |
201 | |
202 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); |
203 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
204 | if (nid != early_pfn_to_nid(pfn)) |
205 | continue; |
206 | |
207 | if (pfn_present(pfn)) |
208 | nr_pages += PAGES_PER_SECTION; |
209 | } |
210 | |
211 | return nr_pages * sizeof(struct page); |
212 | } |
213 | |
214 | /* |
215 | * Subtle, we encode the real pfn into the mem_map such that |
216 | * the identity pfn - section_mem_map will return the actual |
217 | * physical page frame number. |
218 | */ |
219 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) |
220 | { |
221 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); |
222 | } |
223 | |
224 | /* |
225 | * Decode mem_map from the coded memmap |
226 | */ |
227 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
228 | { |
229 | /* mask off the extra low bits of information */ |
230 | coded_mem_map &= SECTION_MAP_MASK; |
231 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
232 | } |
233 | |
234 | static int __meminit sparse_init_one_section(struct mem_section *ms, |
235 | unsigned long pnum, struct page *mem_map, |
236 | unsigned long *pageblock_bitmap) |
237 | { |
238 | if (!present_section(ms)) |
239 | return -EINVAL; |
240 | |
241 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
242 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | |
243 | SECTION_HAS_MEM_MAP; |
244 | ms->pageblock_flags = pageblock_bitmap; |
245 | |
246 | return 1; |
247 | } |
248 | |
249 | unsigned long usemap_size(void) |
250 | { |
251 | unsigned long size_bytes; |
252 | size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; |
253 | size_bytes = roundup(size_bytes, sizeof(unsigned long)); |
254 | return size_bytes; |
255 | } |
256 | |
257 | #ifdef CONFIG_MEMORY_HOTPLUG |
258 | static unsigned long *__kmalloc_section_usemap(void) |
259 | { |
260 | return kmalloc(usemap_size(), GFP_KERNEL); |
261 | } |
262 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
263 | |
264 | #ifdef CONFIG_MEMORY_HOTREMOVE |
265 | static unsigned long * __init |
266 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
267 | unsigned long size) |
268 | { |
269 | unsigned long goal, limit; |
270 | unsigned long *p; |
271 | int nid; |
272 | /* |
273 | * A page may contain usemaps for other sections preventing the |
274 | * page being freed and making a section unremovable while |
275 | * other sections referencing the usemap remain active. Similarly, |
276 | * a pgdat can prevent a section being removed. If section A |
277 | * contains a pgdat and section B contains the usemap, both |
278 | * sections become inter-dependent. This allocates usemaps |
279 | * from the same section as the pgdat where possible to avoid |
280 | * this problem. |
281 | */ |
282 | goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
283 | limit = goal + (1UL << PA_SECTION_SHIFT); |
284 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); |
285 | again: |
286 | p = memblock_virt_alloc_try_nid_nopanic(size, |
287 | SMP_CACHE_BYTES, goal, limit, |
288 | nid); |
289 | if (!p && limit) { |
290 | limit = 0; |
291 | goto again; |
292 | } |
293 | return p; |
294 | } |
295 | |
296 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) |
297 | { |
298 | unsigned long usemap_snr, pgdat_snr; |
299 | static unsigned long old_usemap_snr = NR_MEM_SECTIONS; |
300 | static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; |
301 | struct pglist_data *pgdat = NODE_DATA(nid); |
302 | int usemap_nid; |
303 | |
304 | usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); |
305 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); |
306 | if (usemap_snr == pgdat_snr) |
307 | return; |
308 | |
309 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) |
310 | /* skip redundant message */ |
311 | return; |
312 | |
313 | old_usemap_snr = usemap_snr; |
314 | old_pgdat_snr = pgdat_snr; |
315 | |
316 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); |
317 | if (usemap_nid != nid) { |
318 | pr_info("node %d must be removed before remove section %ld\n", |
319 | nid, usemap_snr); |
320 | return; |
321 | } |
322 | /* |
323 | * There is a circular dependency. |
324 | * Some platforms allow un-removable section because they will just |
325 | * gather other removable sections for dynamic partitioning. |
326 | * Just notify un-removable section's number here. |
327 | */ |
328 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", |
329 | usemap_snr, pgdat_snr, nid); |
330 | } |
331 | #else |
332 | static unsigned long * __init |
333 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
334 | unsigned long size) |
335 | { |
336 | return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); |
337 | } |
338 | |
339 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) |
340 | { |
341 | } |
342 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
343 | |
344 | static void __init sparse_early_usemaps_alloc_node(void *data, |
345 | unsigned long pnum_begin, |
346 | unsigned long pnum_end, |
347 | unsigned long usemap_count, int nodeid) |
348 | { |
349 | void *usemap; |
350 | unsigned long pnum; |
351 | unsigned long **usemap_map = (unsigned long **)data; |
352 | int size = usemap_size(); |
353 | |
354 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
355 | size * usemap_count); |
356 | if (!usemap) { |
357 | pr_warn("%s: allocation failed\n", __func__); |
358 | return; |
359 | } |
360 | |
361 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
362 | if (!present_section_nr(pnum)) |
363 | continue; |
364 | usemap_map[pnum] = usemap; |
365 | usemap += size; |
366 | check_usemap_section_nr(nodeid, usemap_map[pnum]); |
367 | } |
368 | } |
369 | |
370 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
371 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) |
372 | { |
373 | struct page *map; |
374 | unsigned long size; |
375 | |
376 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); |
377 | if (map) |
378 | return map; |
379 | |
380 | size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); |
381 | map = memblock_virt_alloc_try_nid(size, |
382 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), |
383 | BOOTMEM_ALLOC_ACCESSIBLE, nid); |
384 | return map; |
385 | } |
386 | void __init sparse_mem_maps_populate_node(struct page **map_map, |
387 | unsigned long pnum_begin, |
388 | unsigned long pnum_end, |
389 | unsigned long map_count, int nodeid) |
390 | { |
391 | void *map; |
392 | unsigned long pnum; |
393 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; |
394 | |
395 | map = alloc_remap(nodeid, size * map_count); |
396 | if (map) { |
397 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
398 | if (!present_section_nr(pnum)) |
399 | continue; |
400 | map_map[pnum] = map; |
401 | map += size; |
402 | } |
403 | return; |
404 | } |
405 | |
406 | size = PAGE_ALIGN(size); |
407 | map = memblock_virt_alloc_try_nid(size * map_count, |
408 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), |
409 | BOOTMEM_ALLOC_ACCESSIBLE, nodeid); |
410 | if (map) { |
411 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
412 | if (!present_section_nr(pnum)) |
413 | continue; |
414 | map_map[pnum] = map; |
415 | map += size; |
416 | } |
417 | return; |
418 | } |
419 | |
420 | /* fallback */ |
421 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
422 | struct mem_section *ms; |
423 | |
424 | if (!present_section_nr(pnum)) |
425 | continue; |
426 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); |
427 | if (map_map[pnum]) |
428 | continue; |
429 | ms = __nr_to_section(pnum); |
430 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
431 | __func__); |
432 | ms->section_mem_map = 0; |
433 | } |
434 | } |
435 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
436 | |
437 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
438 | static void __init sparse_early_mem_maps_alloc_node(void *data, |
439 | unsigned long pnum_begin, |
440 | unsigned long pnum_end, |
441 | unsigned long map_count, int nodeid) |
442 | { |
443 | struct page **map_map = (struct page **)data; |
444 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, |
445 | map_count, nodeid); |
446 | } |
447 | #else |
448 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
449 | { |
450 | struct page *map; |
451 | struct mem_section *ms = __nr_to_section(pnum); |
452 | int nid = sparse_early_nid(ms); |
453 | |
454 | map = sparse_mem_map_populate(pnum, nid); |
455 | if (map) |
456 | return map; |
457 | |
458 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
459 | __func__); |
460 | ms->section_mem_map = 0; |
461 | return NULL; |
462 | } |
463 | #endif |
464 | |
465 | void __weak __meminit vmemmap_populate_print_last(void) |
466 | { |
467 | } |
468 | |
469 | /** |
470 | * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap |
471 | * @map: usemap_map for pageblock flags or mmap_map for vmemmap |
472 | */ |
473 | static void __init alloc_usemap_and_memmap(void (*alloc_func) |
474 | (void *, unsigned long, unsigned long, |
475 | unsigned long, int), void *data) |
476 | { |
477 | unsigned long pnum; |
478 | unsigned long map_count; |
479 | int nodeid_begin = 0; |
480 | unsigned long pnum_begin = 0; |
481 | |
482 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
483 | struct mem_section *ms; |
484 | |
485 | if (!present_section_nr(pnum)) |
486 | continue; |
487 | ms = __nr_to_section(pnum); |
488 | nodeid_begin = sparse_early_nid(ms); |
489 | pnum_begin = pnum; |
490 | break; |
491 | } |
492 | map_count = 1; |
493 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { |
494 | struct mem_section *ms; |
495 | int nodeid; |
496 | |
497 | if (!present_section_nr(pnum)) |
498 | continue; |
499 | ms = __nr_to_section(pnum); |
500 | nodeid = sparse_early_nid(ms); |
501 | if (nodeid == nodeid_begin) { |
502 | map_count++; |
503 | continue; |
504 | } |
505 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ |
506 | alloc_func(data, pnum_begin, pnum, |
507 | map_count, nodeid_begin); |
508 | /* new start, update count etc*/ |
509 | nodeid_begin = nodeid; |
510 | pnum_begin = pnum; |
511 | map_count = 1; |
512 | } |
513 | /* ok, last chunk */ |
514 | alloc_func(data, pnum_begin, NR_MEM_SECTIONS, |
515 | map_count, nodeid_begin); |
516 | } |
517 | |
518 | /* |
519 | * Allocate the accumulated non-linear sections, allocate a mem_map |
520 | * for each and record the physical to section mapping. |
521 | */ |
522 | void __init sparse_init(void) |
523 | { |
524 | unsigned long pnum; |
525 | struct page *map; |
526 | unsigned long *usemap; |
527 | unsigned long **usemap_map; |
528 | int size; |
529 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
530 | int size2; |
531 | struct page **map_map; |
532 | #endif |
533 | |
534 | /* see include/linux/mmzone.h 'struct mem_section' definition */ |
535 | BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); |
536 | |
537 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ |
538 | set_pageblock_order(); |
539 | |
540 | /* |
541 | * map is using big page (aka 2M in x86 64 bit) |
542 | * usemap is less one page (aka 24 bytes) |
543 | * so alloc 2M (with 2M align) and 24 bytes in turn will |
544 | * make next 2M slip to one more 2M later. |
545 | * then in big system, the memory will have a lot of holes... |
546 | * here try to allocate 2M pages continuously. |
547 | * |
548 | * powerpc need to call sparse_init_one_section right after each |
549 | * sparse_early_mem_map_alloc, so allocate usemap_map at first. |
550 | */ |
551 | size = sizeof(unsigned long *) * NR_MEM_SECTIONS; |
552 | usemap_map = memblock_virt_alloc(size, 0); |
553 | if (!usemap_map) |
554 | panic("can not allocate usemap_map\n"); |
555 | alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, |
556 | (void *)usemap_map); |
557 | |
558 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
559 | size2 = sizeof(struct page *) * NR_MEM_SECTIONS; |
560 | map_map = memblock_virt_alloc(size2, 0); |
561 | if (!map_map) |
562 | panic("can not allocate map_map\n"); |
563 | alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, |
564 | (void *)map_map); |
565 | #endif |
566 | |
567 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
568 | if (!present_section_nr(pnum)) |
569 | continue; |
570 | |
571 | usemap = usemap_map[pnum]; |
572 | if (!usemap) |
573 | continue; |
574 | |
575 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
576 | map = map_map[pnum]; |
577 | #else |
578 | map = sparse_early_mem_map_alloc(pnum); |
579 | #endif |
580 | if (!map) |
581 | continue; |
582 | |
583 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, |
584 | usemap); |
585 | } |
586 | |
587 | vmemmap_populate_print_last(); |
588 | |
589 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
590 | memblock_free_early(__pa(map_map), size2); |
591 | #endif |
592 | memblock_free_early(__pa(usemap_map), size); |
593 | } |
594 | |
595 | #ifdef CONFIG_MEMORY_HOTPLUG |
596 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
597 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) |
598 | { |
599 | /* This will make the necessary allocations eventually. */ |
600 | return sparse_mem_map_populate(pnum, nid); |
601 | } |
602 | static void __kfree_section_memmap(struct page *memmap) |
603 | { |
604 | unsigned long start = (unsigned long)memmap; |
605 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
606 | |
607 | vmemmap_free(start, end); |
608 | } |
609 | #ifdef CONFIG_MEMORY_HOTREMOVE |
610 | static void free_map_bootmem(struct page *memmap) |
611 | { |
612 | unsigned long start = (unsigned long)memmap; |
613 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
614 | |
615 | vmemmap_free(start, end); |
616 | } |
617 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
618 | #else |
619 | static struct page *__kmalloc_section_memmap(void) |
620 | { |
621 | struct page *page, *ret; |
622 | unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; |
623 | |
624 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
625 | if (page) |
626 | goto got_map_page; |
627 | |
628 | ret = vmalloc(memmap_size); |
629 | if (ret) |
630 | goto got_map_ptr; |
631 | |
632 | return NULL; |
633 | got_map_page: |
634 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); |
635 | got_map_ptr: |
636 | |
637 | return ret; |
638 | } |
639 | |
640 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) |
641 | { |
642 | return __kmalloc_section_memmap(); |
643 | } |
644 | |
645 | static void __kfree_section_memmap(struct page *memmap) |
646 | { |
647 | if (is_vmalloc_addr(memmap)) |
648 | vfree(memmap); |
649 | else |
650 | free_pages((unsigned long)memmap, |
651 | get_order(sizeof(struct page) * PAGES_PER_SECTION)); |
652 | } |
653 | |
654 | #ifdef CONFIG_MEMORY_HOTREMOVE |
655 | static void free_map_bootmem(struct page *memmap) |
656 | { |
657 | unsigned long maps_section_nr, removing_section_nr, i; |
658 | unsigned long magic, nr_pages; |
659 | struct page *page = virt_to_page(memmap); |
660 | |
661 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
662 | >> PAGE_SHIFT; |
663 | |
664 | for (i = 0; i < nr_pages; i++, page++) { |
665 | magic = (unsigned long) page->freelist; |
666 | |
667 | BUG_ON(magic == NODE_INFO); |
668 | |
669 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); |
670 | removing_section_nr = page->private; |
671 | |
672 | /* |
673 | * When this function is called, the removing section is |
674 | * logical offlined state. This means all pages are isolated |
675 | * from page allocator. If removing section's memmap is placed |
676 | * on the same section, it must not be freed. |
677 | * If it is freed, page allocator may allocate it which will |
678 | * be removed physically soon. |
679 | */ |
680 | if (maps_section_nr != removing_section_nr) |
681 | put_page_bootmem(page); |
682 | } |
683 | } |
684 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
685 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
686 | |
687 | /* |
688 | * returns the number of sections whose mem_maps were properly |
689 | * set. If this is <=0, then that means that the passed-in |
690 | * map was not consumed and must be freed. |
691 | */ |
692 | int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) |
693 | { |
694 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
695 | struct pglist_data *pgdat = zone->zone_pgdat; |
696 | struct mem_section *ms; |
697 | struct page *memmap; |
698 | unsigned long *usemap; |
699 | unsigned long flags; |
700 | int ret; |
701 | |
702 | /* |
703 | * no locking for this, because it does its own |
704 | * plus, it does a kmalloc |
705 | */ |
706 | ret = sparse_index_init(section_nr, pgdat->node_id); |
707 | if (ret < 0 && ret != -EEXIST) |
708 | return ret; |
709 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); |
710 | if (!memmap) |
711 | return -ENOMEM; |
712 | usemap = __kmalloc_section_usemap(); |
713 | if (!usemap) { |
714 | __kfree_section_memmap(memmap); |
715 | return -ENOMEM; |
716 | } |
717 | |
718 | pgdat_resize_lock(pgdat, &flags); |
719 | |
720 | ms = __pfn_to_section(start_pfn); |
721 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { |
722 | ret = -EEXIST; |
723 | goto out; |
724 | } |
725 | |
726 | memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION); |
727 | |
728 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
729 | |
730 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); |
731 | |
732 | out: |
733 | pgdat_resize_unlock(pgdat, &flags); |
734 | if (ret <= 0) { |
735 | kfree(usemap); |
736 | __kfree_section_memmap(memmap); |
737 | } |
738 | return ret; |
739 | } |
740 | |
741 | #ifdef CONFIG_MEMORY_HOTREMOVE |
742 | #ifdef CONFIG_MEMORY_FAILURE |
743 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
744 | { |
745 | int i; |
746 | |
747 | if (!memmap) |
748 | return; |
749 | |
750 | for (i = 0; i < nr_pages; i++) { |
751 | if (PageHWPoison(&memmap[i])) { |
752 | atomic_long_sub(1, &num_poisoned_pages); |
753 | ClearPageHWPoison(&memmap[i]); |
754 | } |
755 | } |
756 | } |
757 | #else |
758 | static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
759 | { |
760 | } |
761 | #endif |
762 | |
763 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) |
764 | { |
765 | struct page *usemap_page; |
766 | |
767 | if (!usemap) |
768 | return; |
769 | |
770 | usemap_page = virt_to_page(usemap); |
771 | /* |
772 | * Check to see if allocation came from hot-plug-add |
773 | */ |
774 | if (PageSlab(usemap_page) || PageCompound(usemap_page)) { |
775 | kfree(usemap); |
776 | if (memmap) |
777 | __kfree_section_memmap(memmap); |
778 | return; |
779 | } |
780 | |
781 | /* |
782 | * The usemap came from bootmem. This is packed with other usemaps |
783 | * on the section which has pgdat at boot time. Just keep it as is now. |
784 | */ |
785 | |
786 | if (memmap) |
787 | free_map_bootmem(memmap); |
788 | } |
789 | |
790 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
791 | unsigned long map_offset) |
792 | { |
793 | struct page *memmap = NULL; |
794 | unsigned long *usemap = NULL, flags; |
795 | struct pglist_data *pgdat = zone->zone_pgdat; |
796 | |
797 | pgdat_resize_lock(pgdat, &flags); |
798 | if (ms->section_mem_map) { |
799 | usemap = ms->pageblock_flags; |
800 | memmap = sparse_decode_mem_map(ms->section_mem_map, |
801 | __section_nr(ms)); |
802 | ms->section_mem_map = 0; |
803 | ms->pageblock_flags = NULL; |
804 | } |
805 | pgdat_resize_unlock(pgdat, &flags); |
806 | |
807 | clear_hwpoisoned_pages(memmap + map_offset, |
808 | PAGES_PER_SECTION - map_offset); |
809 | free_section_usemap(memmap, usemap); |
810 | } |
811 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
812 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
813 |