blob: 0462a2a00f0597e353cd91d268337e6607bafe24
1 | /* |
2 | * mm/percpu.c - percpu memory allocator |
3 | * |
4 | * Copyright (C) 2009 SUSE Linux Products GmbH |
5 | * Copyright (C) 2009 Tejun Heo <tj@kernel.org> |
6 | * |
7 | * This file is released under the GPLv2. |
8 | * |
9 | * This is percpu allocator which can handle both static and dynamic |
10 | * areas. Percpu areas are allocated in chunks. Each chunk is |
11 | * consisted of boot-time determined number of units and the first |
12 | * chunk is used for static percpu variables in the kernel image |
13 | * (special boot time alloc/init handling necessary as these areas |
14 | * need to be brought up before allocation services are running). |
15 | * Unit grows as necessary and all units grow or shrink in unison. |
16 | * When a chunk is filled up, another chunk is allocated. |
17 | * |
18 | * c0 c1 c2 |
19 | * ------------------- ------------------- ------------ |
20 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u |
21 | * ------------------- ...... ------------------- .... ------------ |
22 | * |
23 | * Allocation is done in offset-size areas of single unit space. Ie, |
24 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, |
25 | * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to |
26 | * cpus. On NUMA, the mapping can be non-linear and even sparse. |
27 | * Percpu access can be done by configuring percpu base registers |
28 | * according to cpu to unit mapping and pcpu_unit_size. |
29 | * |
30 | * There are usually many small percpu allocations many of them being |
31 | * as small as 4 bytes. The allocator organizes chunks into lists |
32 | * according to free size and tries to allocate from the fullest one. |
33 | * Each chunk keeps the maximum contiguous area size hint which is |
34 | * guaranteed to be equal to or larger than the maximum contiguous |
35 | * area in the chunk. This helps the allocator not to iterate the |
36 | * chunk maps unnecessarily. |
37 | * |
38 | * Allocation state in each chunk is kept using an array of integers |
39 | * on chunk->map. A positive value in the map represents a free |
40 | * region and negative allocated. Allocation inside a chunk is done |
41 | * by scanning this map sequentially and serving the first matching |
42 | * entry. This is mostly copied from the percpu_modalloc() allocator. |
43 | * Chunks can be determined from the address using the index field |
44 | * in the page struct. The index field contains a pointer to the chunk. |
45 | * |
46 | * To use this allocator, arch code should do the followings. |
47 | * |
48 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
49 | * regular address to percpu pointer and back if they need to be |
50 | * different from the default |
51 | * |
52 | * - use pcpu_setup_first_chunk() during percpu area initialization to |
53 | * setup the first chunk containing the kernel static percpu area |
54 | */ |
55 | |
56 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
57 | |
58 | #include <linux/bitmap.h> |
59 | #include <linux/bootmem.h> |
60 | #include <linux/err.h> |
61 | #include <linux/list.h> |
62 | #include <linux/log2.h> |
63 | #include <linux/mm.h> |
64 | #include <linux/module.h> |
65 | #include <linux/mutex.h> |
66 | #include <linux/percpu.h> |
67 | #include <linux/pfn.h> |
68 | #include <linux/slab.h> |
69 | #include <linux/spinlock.h> |
70 | #include <linux/vmalloc.h> |
71 | #include <linux/workqueue.h> |
72 | #include <linux/kmemleak.h> |
73 | #include <linux/sched.h> |
74 | |
75 | #include <asm/cacheflush.h> |
76 | #include <asm/sections.h> |
77 | #include <asm/tlbflush.h> |
78 | #include <asm/io.h> |
79 | |
80 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
81 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ |
82 | #define PCPU_ATOMIC_MAP_MARGIN_LOW 32 |
83 | #define PCPU_ATOMIC_MAP_MARGIN_HIGH 64 |
84 | #define PCPU_EMPTY_POP_PAGES_LOW 2 |
85 | #define PCPU_EMPTY_POP_PAGES_HIGH 4 |
86 | |
87 | #ifdef CONFIG_SMP |
88 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
89 | #ifndef __addr_to_pcpu_ptr |
90 | #define __addr_to_pcpu_ptr(addr) \ |
91 | (void __percpu *)((unsigned long)(addr) - \ |
92 | (unsigned long)pcpu_base_addr + \ |
93 | (unsigned long)__per_cpu_start) |
94 | #endif |
95 | #ifndef __pcpu_ptr_to_addr |
96 | #define __pcpu_ptr_to_addr(ptr) \ |
97 | (void __force *)((unsigned long)(ptr) + \ |
98 | (unsigned long)pcpu_base_addr - \ |
99 | (unsigned long)__per_cpu_start) |
100 | #endif |
101 | #else /* CONFIG_SMP */ |
102 | /* on UP, it's always identity mapped */ |
103 | #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) |
104 | #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) |
105 | #endif /* CONFIG_SMP */ |
106 | |
107 | struct pcpu_chunk { |
108 | struct list_head list; /* linked to pcpu_slot lists */ |
109 | int free_size; /* free bytes in the chunk */ |
110 | int contig_hint; /* max contiguous size hint */ |
111 | void *base_addr; /* base address of this chunk */ |
112 | |
113 | int map_used; /* # of map entries used before the sentry */ |
114 | int map_alloc; /* # of map entries allocated */ |
115 | int *map; /* allocation map */ |
116 | struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ |
117 | |
118 | void *data; /* chunk data */ |
119 | int first_free; /* no free below this */ |
120 | bool immutable; /* no [de]population allowed */ |
121 | int nr_populated; /* # of populated pages */ |
122 | unsigned long populated[]; /* populated bitmap */ |
123 | }; |
124 | |
125 | static int pcpu_unit_pages __read_mostly; |
126 | static int pcpu_unit_size __read_mostly; |
127 | static int pcpu_nr_units __read_mostly; |
128 | static int pcpu_atom_size __read_mostly; |
129 | static int pcpu_nr_slots __read_mostly; |
130 | static size_t pcpu_chunk_struct_size __read_mostly; |
131 | |
132 | /* cpus with the lowest and highest unit addresses */ |
133 | static unsigned int pcpu_low_unit_cpu __read_mostly; |
134 | static unsigned int pcpu_high_unit_cpu __read_mostly; |
135 | |
136 | /* the address of the first chunk which starts with the kernel static area */ |
137 | void *pcpu_base_addr __read_mostly; |
138 | EXPORT_SYMBOL_GPL(pcpu_base_addr); |
139 | |
140 | static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ |
141 | const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ |
142 | |
143 | /* group information, used for vm allocation */ |
144 | static int pcpu_nr_groups __read_mostly; |
145 | static const unsigned long *pcpu_group_offsets __read_mostly; |
146 | static const size_t *pcpu_group_sizes __read_mostly; |
147 | |
148 | /* |
149 | * The first chunk which always exists. Note that unlike other |
150 | * chunks, this one can be allocated and mapped in several different |
151 | * ways and thus often doesn't live in the vmalloc area. |
152 | */ |
153 | static struct pcpu_chunk *pcpu_first_chunk; |
154 | |
155 | /* |
156 | * Optional reserved chunk. This chunk reserves part of the first |
157 | * chunk and serves it for reserved allocations. The amount of |
158 | * reserved offset is in pcpu_reserved_chunk_limit. When reserved |
159 | * area doesn't exist, the following variables contain NULL and 0 |
160 | * respectively. |
161 | */ |
162 | static struct pcpu_chunk *pcpu_reserved_chunk; |
163 | static int pcpu_reserved_chunk_limit; |
164 | |
165 | static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ |
166 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ |
167 | |
168 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
169 | |
170 | /* chunks which need their map areas extended, protected by pcpu_lock */ |
171 | static LIST_HEAD(pcpu_map_extend_chunks); |
172 | |
173 | /* |
174 | * The number of empty populated pages, protected by pcpu_lock. The |
175 | * reserved chunk doesn't contribute to the count. |
176 | */ |
177 | static int pcpu_nr_empty_pop_pages; |
178 | |
179 | /* |
180 | * Balance work is used to populate or destroy chunks asynchronously. We |
181 | * try to keep the number of populated free pages between |
182 | * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one |
183 | * empty chunk. |
184 | */ |
185 | static void pcpu_balance_workfn(struct work_struct *work); |
186 | static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); |
187 | static bool pcpu_async_enabled __read_mostly; |
188 | static bool pcpu_atomic_alloc_failed; |
189 | |
190 | static void pcpu_schedule_balance_work(void) |
191 | { |
192 | if (pcpu_async_enabled) |
193 | schedule_work(&pcpu_balance_work); |
194 | } |
195 | |
196 | static bool pcpu_addr_in_first_chunk(void *addr) |
197 | { |
198 | void *first_start = pcpu_first_chunk->base_addr; |
199 | |
200 | return addr >= first_start && addr < first_start + pcpu_unit_size; |
201 | } |
202 | |
203 | static bool pcpu_addr_in_reserved_chunk(void *addr) |
204 | { |
205 | void *first_start = pcpu_first_chunk->base_addr; |
206 | |
207 | return addr >= first_start && |
208 | addr < first_start + pcpu_reserved_chunk_limit; |
209 | } |
210 | |
211 | static int __pcpu_size_to_slot(int size) |
212 | { |
213 | int highbit = fls(size); /* size is in bytes */ |
214 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); |
215 | } |
216 | |
217 | static int pcpu_size_to_slot(int size) |
218 | { |
219 | if (size == pcpu_unit_size) |
220 | return pcpu_nr_slots - 1; |
221 | return __pcpu_size_to_slot(size); |
222 | } |
223 | |
224 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) |
225 | { |
226 | if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) |
227 | return 0; |
228 | |
229 | return pcpu_size_to_slot(chunk->free_size); |
230 | } |
231 | |
232 | /* set the pointer to a chunk in a page struct */ |
233 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) |
234 | { |
235 | page->index = (unsigned long)pcpu; |
236 | } |
237 | |
238 | /* obtain pointer to a chunk from a page struct */ |
239 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) |
240 | { |
241 | return (struct pcpu_chunk *)page->index; |
242 | } |
243 | |
244 | static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) |
245 | { |
246 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; |
247 | } |
248 | |
249 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, |
250 | unsigned int cpu, int page_idx) |
251 | { |
252 | return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + |
253 | (page_idx << PAGE_SHIFT); |
254 | } |
255 | |
256 | static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, |
257 | int *rs, int *re, int end) |
258 | { |
259 | *rs = find_next_zero_bit(chunk->populated, end, *rs); |
260 | *re = find_next_bit(chunk->populated, end, *rs + 1); |
261 | } |
262 | |
263 | static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, |
264 | int *rs, int *re, int end) |
265 | { |
266 | *rs = find_next_bit(chunk->populated, end, *rs); |
267 | *re = find_next_zero_bit(chunk->populated, end, *rs + 1); |
268 | } |
269 | |
270 | /* |
271 | * (Un)populated page region iterators. Iterate over (un)populated |
272 | * page regions between @start and @end in @chunk. @rs and @re should |
273 | * be integer variables and will be set to start and end page index of |
274 | * the current region. |
275 | */ |
276 | #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ |
277 | for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ |
278 | (rs) < (re); \ |
279 | (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) |
280 | |
281 | #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ |
282 | for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ |
283 | (rs) < (re); \ |
284 | (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) |
285 | |
286 | /** |
287 | * pcpu_mem_zalloc - allocate memory |
288 | * @size: bytes to allocate |
289 | * |
290 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, |
291 | * kzalloc() is used; otherwise, vzalloc() is used. The returned |
292 | * memory is always zeroed. |
293 | * |
294 | * CONTEXT: |
295 | * Does GFP_KERNEL allocation. |
296 | * |
297 | * RETURNS: |
298 | * Pointer to the allocated area on success, NULL on failure. |
299 | */ |
300 | static void *pcpu_mem_zalloc(size_t size) |
301 | { |
302 | if (WARN_ON_ONCE(!slab_is_available())) |
303 | return NULL; |
304 | |
305 | if (size <= PAGE_SIZE) |
306 | return kzalloc(size, GFP_KERNEL); |
307 | else |
308 | return vzalloc(size); |
309 | } |
310 | |
311 | /** |
312 | * pcpu_mem_free - free memory |
313 | * @ptr: memory to free |
314 | * |
315 | * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). |
316 | */ |
317 | static void pcpu_mem_free(void *ptr) |
318 | { |
319 | kvfree(ptr); |
320 | } |
321 | |
322 | /** |
323 | * pcpu_count_occupied_pages - count the number of pages an area occupies |
324 | * @chunk: chunk of interest |
325 | * @i: index of the area in question |
326 | * |
327 | * Count the number of pages chunk's @i'th area occupies. When the area's |
328 | * start and/or end address isn't aligned to page boundary, the straddled |
329 | * page is included in the count iff the rest of the page is free. |
330 | */ |
331 | static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i) |
332 | { |
333 | int off = chunk->map[i] & ~1; |
334 | int end = chunk->map[i + 1] & ~1; |
335 | |
336 | if (!PAGE_ALIGNED(off) && i > 0) { |
337 | int prev = chunk->map[i - 1]; |
338 | |
339 | if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE)) |
340 | off = round_down(off, PAGE_SIZE); |
341 | } |
342 | |
343 | if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) { |
344 | int next = chunk->map[i + 1]; |
345 | int nend = chunk->map[i + 2] & ~1; |
346 | |
347 | if (!(next & 1) && nend >= round_up(end, PAGE_SIZE)) |
348 | end = round_up(end, PAGE_SIZE); |
349 | } |
350 | |
351 | return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0); |
352 | } |
353 | |
354 | /** |
355 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot |
356 | * @chunk: chunk of interest |
357 | * @oslot: the previous slot it was on |
358 | * |
359 | * This function is called after an allocation or free changed @chunk. |
360 | * New slot according to the changed state is determined and @chunk is |
361 | * moved to the slot. Note that the reserved chunk is never put on |
362 | * chunk slots. |
363 | * |
364 | * CONTEXT: |
365 | * pcpu_lock. |
366 | */ |
367 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) |
368 | { |
369 | int nslot = pcpu_chunk_slot(chunk); |
370 | |
371 | if (chunk != pcpu_reserved_chunk && oslot != nslot) { |
372 | if (oslot < nslot) |
373 | list_move(&chunk->list, &pcpu_slot[nslot]); |
374 | else |
375 | list_move_tail(&chunk->list, &pcpu_slot[nslot]); |
376 | } |
377 | } |
378 | |
379 | /** |
380 | * pcpu_need_to_extend - determine whether chunk area map needs to be extended |
381 | * @chunk: chunk of interest |
382 | * @is_atomic: the allocation context |
383 | * |
384 | * Determine whether area map of @chunk needs to be extended. If |
385 | * @is_atomic, only the amount necessary for a new allocation is |
386 | * considered; however, async extension is scheduled if the left amount is |
387 | * low. If !@is_atomic, it aims for more empty space. Combined, this |
388 | * ensures that the map is likely to have enough available space to |
389 | * accomodate atomic allocations which can't extend maps directly. |
390 | * |
391 | * CONTEXT: |
392 | * pcpu_lock. |
393 | * |
394 | * RETURNS: |
395 | * New target map allocation length if extension is necessary, 0 |
396 | * otherwise. |
397 | */ |
398 | static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) |
399 | { |
400 | int margin, new_alloc; |
401 | |
402 | lockdep_assert_held(&pcpu_lock); |
403 | |
404 | if (is_atomic) { |
405 | margin = 3; |
406 | |
407 | if (chunk->map_alloc < |
408 | chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) { |
409 | if (list_empty(&chunk->map_extend_list)) { |
410 | list_add_tail(&chunk->map_extend_list, |
411 | &pcpu_map_extend_chunks); |
412 | pcpu_schedule_balance_work(); |
413 | } |
414 | } |
415 | } else { |
416 | margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; |
417 | } |
418 | |
419 | if (chunk->map_alloc >= chunk->map_used + margin) |
420 | return 0; |
421 | |
422 | new_alloc = PCPU_DFL_MAP_ALLOC; |
423 | while (new_alloc < chunk->map_used + margin) |
424 | new_alloc *= 2; |
425 | |
426 | return new_alloc; |
427 | } |
428 | |
429 | /** |
430 | * pcpu_extend_area_map - extend area map of a chunk |
431 | * @chunk: chunk of interest |
432 | * @new_alloc: new target allocation length of the area map |
433 | * |
434 | * Extend area map of @chunk to have @new_alloc entries. |
435 | * |
436 | * CONTEXT: |
437 | * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. |
438 | * |
439 | * RETURNS: |
440 | * 0 on success, -errno on failure. |
441 | */ |
442 | static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) |
443 | { |
444 | int *old = NULL, *new = NULL; |
445 | size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); |
446 | unsigned long flags; |
447 | |
448 | lockdep_assert_held(&pcpu_alloc_mutex); |
449 | |
450 | new = pcpu_mem_zalloc(new_size); |
451 | if (!new) |
452 | return -ENOMEM; |
453 | |
454 | /* acquire pcpu_lock and switch to new area map */ |
455 | spin_lock_irqsave(&pcpu_lock, flags); |
456 | |
457 | if (new_alloc <= chunk->map_alloc) |
458 | goto out_unlock; |
459 | |
460 | old_size = chunk->map_alloc * sizeof(chunk->map[0]); |
461 | old = chunk->map; |
462 | |
463 | memcpy(new, old, old_size); |
464 | |
465 | chunk->map_alloc = new_alloc; |
466 | chunk->map = new; |
467 | new = NULL; |
468 | |
469 | out_unlock: |
470 | spin_unlock_irqrestore(&pcpu_lock, flags); |
471 | |
472 | /* |
473 | * pcpu_mem_free() might end up calling vfree() which uses |
474 | * IRQ-unsafe lock and thus can't be called under pcpu_lock. |
475 | */ |
476 | pcpu_mem_free(old); |
477 | pcpu_mem_free(new); |
478 | |
479 | return 0; |
480 | } |
481 | |
482 | /** |
483 | * pcpu_fit_in_area - try to fit the requested allocation in a candidate area |
484 | * @chunk: chunk the candidate area belongs to |
485 | * @off: the offset to the start of the candidate area |
486 | * @this_size: the size of the candidate area |
487 | * @size: the size of the target allocation |
488 | * @align: the alignment of the target allocation |
489 | * @pop_only: only allocate from already populated region |
490 | * |
491 | * We're trying to allocate @size bytes aligned at @align. @chunk's area |
492 | * at @off sized @this_size is a candidate. This function determines |
493 | * whether the target allocation fits in the candidate area and returns the |
494 | * number of bytes to pad after @off. If the target area doesn't fit, -1 |
495 | * is returned. |
496 | * |
497 | * If @pop_only is %true, this function only considers the already |
498 | * populated part of the candidate area. |
499 | */ |
500 | static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, |
501 | int size, int align, bool pop_only) |
502 | { |
503 | int cand_off = off; |
504 | |
505 | while (true) { |
506 | int head = ALIGN(cand_off, align) - off; |
507 | int page_start, page_end, rs, re; |
508 | |
509 | if (this_size < head + size) |
510 | return -1; |
511 | |
512 | if (!pop_only) |
513 | return head; |
514 | |
515 | /* |
516 | * If the first unpopulated page is beyond the end of the |
517 | * allocation, the whole allocation is populated; |
518 | * otherwise, retry from the end of the unpopulated area. |
519 | */ |
520 | page_start = PFN_DOWN(head + off); |
521 | page_end = PFN_UP(head + off + size); |
522 | |
523 | rs = page_start; |
524 | pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size)); |
525 | if (rs >= page_end) |
526 | return head; |
527 | cand_off = re * PAGE_SIZE; |
528 | } |
529 | } |
530 | |
531 | /** |
532 | * pcpu_alloc_area - allocate area from a pcpu_chunk |
533 | * @chunk: chunk of interest |
534 | * @size: wanted size in bytes |
535 | * @align: wanted align |
536 | * @pop_only: allocate only from the populated area |
537 | * @occ_pages_p: out param for the number of pages the area occupies |
538 | * |
539 | * Try to allocate @size bytes area aligned at @align from @chunk. |
540 | * Note that this function only allocates the offset. It doesn't |
541 | * populate or map the area. |
542 | * |
543 | * @chunk->map must have at least two free slots. |
544 | * |
545 | * CONTEXT: |
546 | * pcpu_lock. |
547 | * |
548 | * RETURNS: |
549 | * Allocated offset in @chunk on success, -1 if no matching area is |
550 | * found. |
551 | */ |
552 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, |
553 | bool pop_only, int *occ_pages_p) |
554 | { |
555 | int oslot = pcpu_chunk_slot(chunk); |
556 | int max_contig = 0; |
557 | int i, off; |
558 | bool seen_free = false; |
559 | int *p; |
560 | |
561 | for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { |
562 | int head, tail; |
563 | int this_size; |
564 | |
565 | off = *p; |
566 | if (off & 1) |
567 | continue; |
568 | |
569 | this_size = (p[1] & ~1) - off; |
570 | |
571 | head = pcpu_fit_in_area(chunk, off, this_size, size, align, |
572 | pop_only); |
573 | if (head < 0) { |
574 | if (!seen_free) { |
575 | chunk->first_free = i; |
576 | seen_free = true; |
577 | } |
578 | max_contig = max(this_size, max_contig); |
579 | continue; |
580 | } |
581 | |
582 | /* |
583 | * If head is small or the previous block is free, |
584 | * merge'em. Note that 'small' is defined as smaller |
585 | * than sizeof(int), which is very small but isn't too |
586 | * uncommon for percpu allocations. |
587 | */ |
588 | if (head && (head < sizeof(int) || !(p[-1] & 1))) { |
589 | *p = off += head; |
590 | if (p[-1] & 1) |
591 | chunk->free_size -= head; |
592 | else |
593 | max_contig = max(*p - p[-1], max_contig); |
594 | this_size -= head; |
595 | head = 0; |
596 | } |
597 | |
598 | /* if tail is small, just keep it around */ |
599 | tail = this_size - head - size; |
600 | if (tail < sizeof(int)) { |
601 | tail = 0; |
602 | size = this_size - head; |
603 | } |
604 | |
605 | /* split if warranted */ |
606 | if (head || tail) { |
607 | int nr_extra = !!head + !!tail; |
608 | |
609 | /* insert new subblocks */ |
610 | memmove(p + nr_extra + 1, p + 1, |
611 | sizeof(chunk->map[0]) * (chunk->map_used - i)); |
612 | chunk->map_used += nr_extra; |
613 | |
614 | if (head) { |
615 | if (!seen_free) { |
616 | chunk->first_free = i; |
617 | seen_free = true; |
618 | } |
619 | *++p = off += head; |
620 | ++i; |
621 | max_contig = max(head, max_contig); |
622 | } |
623 | if (tail) { |
624 | p[1] = off + size; |
625 | max_contig = max(tail, max_contig); |
626 | } |
627 | } |
628 | |
629 | if (!seen_free) |
630 | chunk->first_free = i + 1; |
631 | |
632 | /* update hint and mark allocated */ |
633 | if (i + 1 == chunk->map_used) |
634 | chunk->contig_hint = max_contig; /* fully scanned */ |
635 | else |
636 | chunk->contig_hint = max(chunk->contig_hint, |
637 | max_contig); |
638 | |
639 | chunk->free_size -= size; |
640 | *p |= 1; |
641 | |
642 | *occ_pages_p = pcpu_count_occupied_pages(chunk, i); |
643 | pcpu_chunk_relocate(chunk, oslot); |
644 | return off; |
645 | } |
646 | |
647 | chunk->contig_hint = max_contig; /* fully scanned */ |
648 | pcpu_chunk_relocate(chunk, oslot); |
649 | |
650 | /* tell the upper layer that this chunk has no matching area */ |
651 | return -1; |
652 | } |
653 | |
654 | /** |
655 | * pcpu_free_area - free area to a pcpu_chunk |
656 | * @chunk: chunk of interest |
657 | * @freeme: offset of area to free |
658 | * @occ_pages_p: out param for the number of pages the area occupies |
659 | * |
660 | * Free area starting from @freeme to @chunk. Note that this function |
661 | * only modifies the allocation map. It doesn't depopulate or unmap |
662 | * the area. |
663 | * |
664 | * CONTEXT: |
665 | * pcpu_lock. |
666 | */ |
667 | static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme, |
668 | int *occ_pages_p) |
669 | { |
670 | int oslot = pcpu_chunk_slot(chunk); |
671 | int off = 0; |
672 | unsigned i, j; |
673 | int to_free = 0; |
674 | int *p; |
675 | |
676 | freeme |= 1; /* we are searching for <given offset, in use> pair */ |
677 | |
678 | i = 0; |
679 | j = chunk->map_used; |
680 | while (i != j) { |
681 | unsigned k = (i + j) / 2; |
682 | off = chunk->map[k]; |
683 | if (off < freeme) |
684 | i = k + 1; |
685 | else if (off > freeme) |
686 | j = k; |
687 | else |
688 | i = j = k; |
689 | } |
690 | BUG_ON(off != freeme); |
691 | |
692 | if (i < chunk->first_free) |
693 | chunk->first_free = i; |
694 | |
695 | p = chunk->map + i; |
696 | *p = off &= ~1; |
697 | chunk->free_size += (p[1] & ~1) - off; |
698 | |
699 | *occ_pages_p = pcpu_count_occupied_pages(chunk, i); |
700 | |
701 | /* merge with next? */ |
702 | if (!(p[1] & 1)) |
703 | to_free++; |
704 | /* merge with previous? */ |
705 | if (i > 0 && !(p[-1] & 1)) { |
706 | to_free++; |
707 | i--; |
708 | p--; |
709 | } |
710 | if (to_free) { |
711 | chunk->map_used -= to_free; |
712 | memmove(p + 1, p + 1 + to_free, |
713 | (chunk->map_used - i) * sizeof(chunk->map[0])); |
714 | } |
715 | |
716 | chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); |
717 | pcpu_chunk_relocate(chunk, oslot); |
718 | } |
719 | |
720 | static struct pcpu_chunk *pcpu_alloc_chunk(void) |
721 | { |
722 | struct pcpu_chunk *chunk; |
723 | |
724 | chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); |
725 | if (!chunk) |
726 | return NULL; |
727 | |
728 | chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * |
729 | sizeof(chunk->map[0])); |
730 | if (!chunk->map) { |
731 | pcpu_mem_free(chunk); |
732 | return NULL; |
733 | } |
734 | |
735 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; |
736 | chunk->map[0] = 0; |
737 | chunk->map[1] = pcpu_unit_size | 1; |
738 | chunk->map_used = 1; |
739 | |
740 | INIT_LIST_HEAD(&chunk->list); |
741 | INIT_LIST_HEAD(&chunk->map_extend_list); |
742 | chunk->free_size = pcpu_unit_size; |
743 | chunk->contig_hint = pcpu_unit_size; |
744 | |
745 | return chunk; |
746 | } |
747 | |
748 | static void pcpu_free_chunk(struct pcpu_chunk *chunk) |
749 | { |
750 | if (!chunk) |
751 | return; |
752 | pcpu_mem_free(chunk->map); |
753 | pcpu_mem_free(chunk); |
754 | } |
755 | |
756 | /** |
757 | * pcpu_chunk_populated - post-population bookkeeping |
758 | * @chunk: pcpu_chunk which got populated |
759 | * @page_start: the start page |
760 | * @page_end: the end page |
761 | * |
762 | * Pages in [@page_start,@page_end) have been populated to @chunk. Update |
763 | * the bookkeeping information accordingly. Must be called after each |
764 | * successful population. |
765 | */ |
766 | static void pcpu_chunk_populated(struct pcpu_chunk *chunk, |
767 | int page_start, int page_end) |
768 | { |
769 | int nr = page_end - page_start; |
770 | |
771 | lockdep_assert_held(&pcpu_lock); |
772 | |
773 | bitmap_set(chunk->populated, page_start, nr); |
774 | chunk->nr_populated += nr; |
775 | pcpu_nr_empty_pop_pages += nr; |
776 | } |
777 | |
778 | /** |
779 | * pcpu_chunk_depopulated - post-depopulation bookkeeping |
780 | * @chunk: pcpu_chunk which got depopulated |
781 | * @page_start: the start page |
782 | * @page_end: the end page |
783 | * |
784 | * Pages in [@page_start,@page_end) have been depopulated from @chunk. |
785 | * Update the bookkeeping information accordingly. Must be called after |
786 | * each successful depopulation. |
787 | */ |
788 | static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, |
789 | int page_start, int page_end) |
790 | { |
791 | int nr = page_end - page_start; |
792 | |
793 | lockdep_assert_held(&pcpu_lock); |
794 | |
795 | bitmap_clear(chunk->populated, page_start, nr); |
796 | chunk->nr_populated -= nr; |
797 | pcpu_nr_empty_pop_pages -= nr; |
798 | } |
799 | |
800 | /* |
801 | * Chunk management implementation. |
802 | * |
803 | * To allow different implementations, chunk alloc/free and |
804 | * [de]population are implemented in a separate file which is pulled |
805 | * into this file and compiled together. The following functions |
806 | * should be implemented. |
807 | * |
808 | * pcpu_populate_chunk - populate the specified range of a chunk |
809 | * pcpu_depopulate_chunk - depopulate the specified range of a chunk |
810 | * pcpu_create_chunk - create a new chunk |
811 | * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop |
812 | * pcpu_addr_to_page - translate address to physical address |
813 | * pcpu_verify_alloc_info - check alloc_info is acceptable during init |
814 | */ |
815 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); |
816 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); |
817 | static struct pcpu_chunk *pcpu_create_chunk(void); |
818 | static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); |
819 | static struct page *pcpu_addr_to_page(void *addr); |
820 | static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); |
821 | |
822 | #ifdef CONFIG_NEED_PER_CPU_KM |
823 | #include "percpu-km.c" |
824 | #else |
825 | #include "percpu-vm.c" |
826 | #endif |
827 | |
828 | /** |
829 | * pcpu_chunk_addr_search - determine chunk containing specified address |
830 | * @addr: address for which the chunk needs to be determined. |
831 | * |
832 | * RETURNS: |
833 | * The address of the found chunk. |
834 | */ |
835 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) |
836 | { |
837 | /* is it in the first chunk? */ |
838 | if (pcpu_addr_in_first_chunk(addr)) { |
839 | /* is it in the reserved area? */ |
840 | if (pcpu_addr_in_reserved_chunk(addr)) |
841 | return pcpu_reserved_chunk; |
842 | return pcpu_first_chunk; |
843 | } |
844 | |
845 | /* |
846 | * The address is relative to unit0 which might be unused and |
847 | * thus unmapped. Offset the address to the unit space of the |
848 | * current processor before looking it up in the vmalloc |
849 | * space. Note that any possible cpu id can be used here, so |
850 | * there's no need to worry about preemption or cpu hotplug. |
851 | */ |
852 | addr += pcpu_unit_offsets[raw_smp_processor_id()]; |
853 | return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); |
854 | } |
855 | |
856 | /** |
857 | * pcpu_alloc - the percpu allocator |
858 | * @size: size of area to allocate in bytes |
859 | * @align: alignment of area (max PAGE_SIZE) |
860 | * @reserved: allocate from the reserved chunk if available |
861 | * @gfp: allocation flags |
862 | * |
863 | * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't |
864 | * contain %GFP_KERNEL, the allocation is atomic. |
865 | * |
866 | * RETURNS: |
867 | * Percpu pointer to the allocated area on success, NULL on failure. |
868 | */ |
869 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, |
870 | gfp_t gfp) |
871 | { |
872 | static int warn_limit = 10; |
873 | struct pcpu_chunk *chunk; |
874 | const char *err; |
875 | bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; |
876 | int occ_pages = 0; |
877 | int slot, off, new_alloc, cpu, ret; |
878 | unsigned long flags; |
879 | void __percpu *ptr; |
880 | |
881 | /* |
882 | * We want the lowest bit of offset available for in-use/free |
883 | * indicator, so force >= 16bit alignment and make size even. |
884 | */ |
885 | if (unlikely(align < 2)) |
886 | align = 2; |
887 | |
888 | size = ALIGN(size, 2); |
889 | |
890 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { |
891 | WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", |
892 | size, align); |
893 | return NULL; |
894 | } |
895 | |
896 | if (!is_atomic) |
897 | mutex_lock(&pcpu_alloc_mutex); |
898 | |
899 | spin_lock_irqsave(&pcpu_lock, flags); |
900 | |
901 | /* serve reserved allocations from the reserved chunk if available */ |
902 | if (reserved && pcpu_reserved_chunk) { |
903 | chunk = pcpu_reserved_chunk; |
904 | |
905 | if (size > chunk->contig_hint) { |
906 | err = "alloc from reserved chunk failed"; |
907 | goto fail_unlock; |
908 | } |
909 | |
910 | while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) { |
911 | spin_unlock_irqrestore(&pcpu_lock, flags); |
912 | if (is_atomic || |
913 | pcpu_extend_area_map(chunk, new_alloc) < 0) { |
914 | err = "failed to extend area map of reserved chunk"; |
915 | goto fail; |
916 | } |
917 | spin_lock_irqsave(&pcpu_lock, flags); |
918 | } |
919 | |
920 | off = pcpu_alloc_area(chunk, size, align, is_atomic, |
921 | &occ_pages); |
922 | if (off >= 0) |
923 | goto area_found; |
924 | |
925 | err = "alloc from reserved chunk failed"; |
926 | goto fail_unlock; |
927 | } |
928 | |
929 | restart: |
930 | /* search through normal chunks */ |
931 | for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { |
932 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { |
933 | if (size > chunk->contig_hint) |
934 | continue; |
935 | |
936 | new_alloc = pcpu_need_to_extend(chunk, is_atomic); |
937 | if (new_alloc) { |
938 | if (is_atomic) |
939 | continue; |
940 | spin_unlock_irqrestore(&pcpu_lock, flags); |
941 | if (pcpu_extend_area_map(chunk, |
942 | new_alloc) < 0) { |
943 | err = "failed to extend area map"; |
944 | goto fail; |
945 | } |
946 | spin_lock_irqsave(&pcpu_lock, flags); |
947 | /* |
948 | * pcpu_lock has been dropped, need to |
949 | * restart cpu_slot list walking. |
950 | */ |
951 | goto restart; |
952 | } |
953 | |
954 | off = pcpu_alloc_area(chunk, size, align, is_atomic, |
955 | &occ_pages); |
956 | if (off >= 0) |
957 | goto area_found; |
958 | } |
959 | } |
960 | |
961 | spin_unlock_irqrestore(&pcpu_lock, flags); |
962 | |
963 | /* |
964 | * No space left. Create a new chunk. We don't want multiple |
965 | * tasks to create chunks simultaneously. Serialize and create iff |
966 | * there's still no empty chunk after grabbing the mutex. |
967 | */ |
968 | if (is_atomic) |
969 | goto fail; |
970 | |
971 | if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { |
972 | chunk = pcpu_create_chunk(); |
973 | if (!chunk) { |
974 | err = "failed to allocate new chunk"; |
975 | goto fail; |
976 | } |
977 | |
978 | spin_lock_irqsave(&pcpu_lock, flags); |
979 | pcpu_chunk_relocate(chunk, -1); |
980 | } else { |
981 | spin_lock_irqsave(&pcpu_lock, flags); |
982 | } |
983 | |
984 | goto restart; |
985 | |
986 | area_found: |
987 | spin_unlock_irqrestore(&pcpu_lock, flags); |
988 | |
989 | /* populate if not all pages are already there */ |
990 | if (!is_atomic) { |
991 | int page_start, page_end, rs, re; |
992 | |
993 | page_start = PFN_DOWN(off); |
994 | page_end = PFN_UP(off + size); |
995 | |
996 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { |
997 | WARN_ON(chunk->immutable); |
998 | |
999 | ret = pcpu_populate_chunk(chunk, rs, re); |
1000 | |
1001 | spin_lock_irqsave(&pcpu_lock, flags); |
1002 | if (ret) { |
1003 | pcpu_free_area(chunk, off, &occ_pages); |
1004 | err = "failed to populate"; |
1005 | goto fail_unlock; |
1006 | } |
1007 | pcpu_chunk_populated(chunk, rs, re); |
1008 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1009 | } |
1010 | |
1011 | mutex_unlock(&pcpu_alloc_mutex); |
1012 | } |
1013 | |
1014 | if (chunk != pcpu_reserved_chunk) { |
1015 | spin_lock_irqsave(&pcpu_lock, flags); |
1016 | pcpu_nr_empty_pop_pages -= occ_pages; |
1017 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1018 | } |
1019 | |
1020 | if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) |
1021 | pcpu_schedule_balance_work(); |
1022 | |
1023 | /* clear the areas and return address relative to base address */ |
1024 | for_each_possible_cpu(cpu) |
1025 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); |
1026 | |
1027 | ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); |
1028 | kmemleak_alloc_percpu(ptr, size, gfp); |
1029 | return ptr; |
1030 | |
1031 | fail_unlock: |
1032 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1033 | fail: |
1034 | if (!is_atomic && warn_limit) { |
1035 | pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", |
1036 | size, align, is_atomic, err); |
1037 | dump_stack(); |
1038 | if (!--warn_limit) |
1039 | pr_info("limit reached, disable warning\n"); |
1040 | } |
1041 | if (is_atomic) { |
1042 | /* see the flag handling in pcpu_blance_workfn() */ |
1043 | pcpu_atomic_alloc_failed = true; |
1044 | pcpu_schedule_balance_work(); |
1045 | } else { |
1046 | mutex_unlock(&pcpu_alloc_mutex); |
1047 | } |
1048 | return NULL; |
1049 | } |
1050 | |
1051 | /** |
1052 | * __alloc_percpu_gfp - allocate dynamic percpu area |
1053 | * @size: size of area to allocate in bytes |
1054 | * @align: alignment of area (max PAGE_SIZE) |
1055 | * @gfp: allocation flags |
1056 | * |
1057 | * Allocate zero-filled percpu area of @size bytes aligned at @align. If |
1058 | * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can |
1059 | * be called from any context but is a lot more likely to fail. |
1060 | * |
1061 | * RETURNS: |
1062 | * Percpu pointer to the allocated area on success, NULL on failure. |
1063 | */ |
1064 | void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) |
1065 | { |
1066 | return pcpu_alloc(size, align, false, gfp); |
1067 | } |
1068 | EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); |
1069 | |
1070 | /** |
1071 | * __alloc_percpu - allocate dynamic percpu area |
1072 | * @size: size of area to allocate in bytes |
1073 | * @align: alignment of area (max PAGE_SIZE) |
1074 | * |
1075 | * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). |
1076 | */ |
1077 | void __percpu *__alloc_percpu(size_t size, size_t align) |
1078 | { |
1079 | return pcpu_alloc(size, align, false, GFP_KERNEL); |
1080 | } |
1081 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
1082 | |
1083 | /** |
1084 | * __alloc_reserved_percpu - allocate reserved percpu area |
1085 | * @size: size of area to allocate in bytes |
1086 | * @align: alignment of area (max PAGE_SIZE) |
1087 | * |
1088 | * Allocate zero-filled percpu area of @size bytes aligned at @align |
1089 | * from reserved percpu area if arch has set it up; otherwise, |
1090 | * allocation is served from the same dynamic area. Might sleep. |
1091 | * Might trigger writeouts. |
1092 | * |
1093 | * CONTEXT: |
1094 | * Does GFP_KERNEL allocation. |
1095 | * |
1096 | * RETURNS: |
1097 | * Percpu pointer to the allocated area on success, NULL on failure. |
1098 | */ |
1099 | void __percpu *__alloc_reserved_percpu(size_t size, size_t align) |
1100 | { |
1101 | return pcpu_alloc(size, align, true, GFP_KERNEL); |
1102 | } |
1103 | |
1104 | /** |
1105 | * pcpu_balance_workfn - manage the amount of free chunks and populated pages |
1106 | * @work: unused |
1107 | * |
1108 | * Reclaim all fully free chunks except for the first one. |
1109 | */ |
1110 | static void pcpu_balance_workfn(struct work_struct *work) |
1111 | { |
1112 | LIST_HEAD(to_free); |
1113 | struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; |
1114 | struct pcpu_chunk *chunk, *next; |
1115 | int slot, nr_to_pop, ret; |
1116 | |
1117 | /* |
1118 | * There's no reason to keep around multiple unused chunks and VM |
1119 | * areas can be scarce. Destroy all free chunks except for one. |
1120 | */ |
1121 | mutex_lock(&pcpu_alloc_mutex); |
1122 | spin_lock_irq(&pcpu_lock); |
1123 | |
1124 | list_for_each_entry_safe(chunk, next, free_head, list) { |
1125 | WARN_ON(chunk->immutable); |
1126 | |
1127 | /* spare the first one */ |
1128 | if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) |
1129 | continue; |
1130 | |
1131 | list_del_init(&chunk->map_extend_list); |
1132 | list_move(&chunk->list, &to_free); |
1133 | } |
1134 | |
1135 | spin_unlock_irq(&pcpu_lock); |
1136 | |
1137 | list_for_each_entry_safe(chunk, next, &to_free, list) { |
1138 | int rs, re; |
1139 | |
1140 | pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { |
1141 | pcpu_depopulate_chunk(chunk, rs, re); |
1142 | spin_lock_irq(&pcpu_lock); |
1143 | pcpu_chunk_depopulated(chunk, rs, re); |
1144 | spin_unlock_irq(&pcpu_lock); |
1145 | } |
1146 | pcpu_destroy_chunk(chunk); |
1147 | } |
1148 | |
1149 | /* service chunks which requested async area map extension */ |
1150 | do { |
1151 | int new_alloc = 0; |
1152 | |
1153 | spin_lock_irq(&pcpu_lock); |
1154 | |
1155 | chunk = list_first_entry_or_null(&pcpu_map_extend_chunks, |
1156 | struct pcpu_chunk, map_extend_list); |
1157 | if (chunk) { |
1158 | list_del_init(&chunk->map_extend_list); |
1159 | new_alloc = pcpu_need_to_extend(chunk, false); |
1160 | } |
1161 | |
1162 | spin_unlock_irq(&pcpu_lock); |
1163 | |
1164 | if (new_alloc) |
1165 | pcpu_extend_area_map(chunk, new_alloc); |
1166 | } while (chunk); |
1167 | |
1168 | /* |
1169 | * Ensure there are certain number of free populated pages for |
1170 | * atomic allocs. Fill up from the most packed so that atomic |
1171 | * allocs don't increase fragmentation. If atomic allocation |
1172 | * failed previously, always populate the maximum amount. This |
1173 | * should prevent atomic allocs larger than PAGE_SIZE from keeping |
1174 | * failing indefinitely; however, large atomic allocs are not |
1175 | * something we support properly and can be highly unreliable and |
1176 | * inefficient. |
1177 | */ |
1178 | retry_pop: |
1179 | if (pcpu_atomic_alloc_failed) { |
1180 | nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; |
1181 | /* best effort anyway, don't worry about synchronization */ |
1182 | pcpu_atomic_alloc_failed = false; |
1183 | } else { |
1184 | nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - |
1185 | pcpu_nr_empty_pop_pages, |
1186 | 0, PCPU_EMPTY_POP_PAGES_HIGH); |
1187 | } |
1188 | |
1189 | for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { |
1190 | int nr_unpop = 0, rs, re; |
1191 | |
1192 | if (!nr_to_pop) |
1193 | break; |
1194 | |
1195 | spin_lock_irq(&pcpu_lock); |
1196 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { |
1197 | nr_unpop = pcpu_unit_pages - chunk->nr_populated; |
1198 | if (nr_unpop) |
1199 | break; |
1200 | } |
1201 | spin_unlock_irq(&pcpu_lock); |
1202 | |
1203 | if (!nr_unpop) |
1204 | continue; |
1205 | |
1206 | /* @chunk can't go away while pcpu_alloc_mutex is held */ |
1207 | pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) { |
1208 | int nr = min(re - rs, nr_to_pop); |
1209 | |
1210 | ret = pcpu_populate_chunk(chunk, rs, rs + nr); |
1211 | if (!ret) { |
1212 | nr_to_pop -= nr; |
1213 | spin_lock_irq(&pcpu_lock); |
1214 | pcpu_chunk_populated(chunk, rs, rs + nr); |
1215 | spin_unlock_irq(&pcpu_lock); |
1216 | } else { |
1217 | nr_to_pop = 0; |
1218 | } |
1219 | |
1220 | if (!nr_to_pop) |
1221 | break; |
1222 | } |
1223 | } |
1224 | |
1225 | if (nr_to_pop) { |
1226 | /* ran out of chunks to populate, create a new one and retry */ |
1227 | chunk = pcpu_create_chunk(); |
1228 | if (chunk) { |
1229 | spin_lock_irq(&pcpu_lock); |
1230 | pcpu_chunk_relocate(chunk, -1); |
1231 | spin_unlock_irq(&pcpu_lock); |
1232 | goto retry_pop; |
1233 | } |
1234 | } |
1235 | |
1236 | mutex_unlock(&pcpu_alloc_mutex); |
1237 | } |
1238 | |
1239 | /** |
1240 | * free_percpu - free percpu area |
1241 | * @ptr: pointer to area to free |
1242 | * |
1243 | * Free percpu area @ptr. |
1244 | * |
1245 | * CONTEXT: |
1246 | * Can be called from atomic context. |
1247 | */ |
1248 | void free_percpu(void __percpu *ptr) |
1249 | { |
1250 | void *addr; |
1251 | struct pcpu_chunk *chunk; |
1252 | unsigned long flags; |
1253 | int off, occ_pages; |
1254 | |
1255 | if (!ptr) |
1256 | return; |
1257 | |
1258 | kmemleak_free_percpu(ptr); |
1259 | |
1260 | addr = __pcpu_ptr_to_addr(ptr); |
1261 | |
1262 | spin_lock_irqsave(&pcpu_lock, flags); |
1263 | |
1264 | chunk = pcpu_chunk_addr_search(addr); |
1265 | off = addr - chunk->base_addr; |
1266 | |
1267 | pcpu_free_area(chunk, off, &occ_pages); |
1268 | |
1269 | if (chunk != pcpu_reserved_chunk) |
1270 | pcpu_nr_empty_pop_pages += occ_pages; |
1271 | |
1272 | /* if there are more than one fully free chunks, wake up grim reaper */ |
1273 | if (chunk->free_size == pcpu_unit_size) { |
1274 | struct pcpu_chunk *pos; |
1275 | |
1276 | list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) |
1277 | if (pos != chunk) { |
1278 | pcpu_schedule_balance_work(); |
1279 | break; |
1280 | } |
1281 | } |
1282 | |
1283 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1284 | } |
1285 | EXPORT_SYMBOL_GPL(free_percpu); |
1286 | |
1287 | /** |
1288 | * is_kernel_percpu_address - test whether address is from static percpu area |
1289 | * @addr: address to test |
1290 | * |
1291 | * Test whether @addr belongs to in-kernel static percpu area. Module |
1292 | * static percpu areas are not considered. For those, use |
1293 | * is_module_percpu_address(). |
1294 | * |
1295 | * RETURNS: |
1296 | * %true if @addr is from in-kernel static percpu area, %false otherwise. |
1297 | */ |
1298 | bool is_kernel_percpu_address(unsigned long addr) |
1299 | { |
1300 | #ifdef CONFIG_SMP |
1301 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
1302 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
1303 | unsigned int cpu; |
1304 | |
1305 | for_each_possible_cpu(cpu) { |
1306 | void *start = per_cpu_ptr(base, cpu); |
1307 | |
1308 | if ((void *)addr >= start && (void *)addr < start + static_size) |
1309 | return true; |
1310 | } |
1311 | #endif |
1312 | /* on UP, can't distinguish from other static vars, always false */ |
1313 | return false; |
1314 | } |
1315 | |
1316 | /** |
1317 | * per_cpu_ptr_to_phys - convert translated percpu address to physical address |
1318 | * @addr: the address to be converted to physical address |
1319 | * |
1320 | * Given @addr which is dereferenceable address obtained via one of |
1321 | * percpu access macros, this function translates it into its physical |
1322 | * address. The caller is responsible for ensuring @addr stays valid |
1323 | * until this function finishes. |
1324 | * |
1325 | * percpu allocator has special setup for the first chunk, which currently |
1326 | * supports either embedding in linear address space or vmalloc mapping, |
1327 | * and, from the second one, the backing allocator (currently either vm or |
1328 | * km) provides translation. |
1329 | * |
1330 | * The addr can be translated simply without checking if it falls into the |
1331 | * first chunk. But the current code reflects better how percpu allocator |
1332 | * actually works, and the verification can discover both bugs in percpu |
1333 | * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current |
1334 | * code. |
1335 | * |
1336 | * RETURNS: |
1337 | * The physical address for @addr. |
1338 | */ |
1339 | phys_addr_t per_cpu_ptr_to_phys(void *addr) |
1340 | { |
1341 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
1342 | bool in_first_chunk = false; |
1343 | unsigned long first_low, first_high; |
1344 | unsigned int cpu; |
1345 | |
1346 | /* |
1347 | * The following test on unit_low/high isn't strictly |
1348 | * necessary but will speed up lookups of addresses which |
1349 | * aren't in the first chunk. |
1350 | */ |
1351 | first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); |
1352 | first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, |
1353 | pcpu_unit_pages); |
1354 | if ((unsigned long)addr >= first_low && |
1355 | (unsigned long)addr < first_high) { |
1356 | for_each_possible_cpu(cpu) { |
1357 | void *start = per_cpu_ptr(base, cpu); |
1358 | |
1359 | if (addr >= start && addr < start + pcpu_unit_size) { |
1360 | in_first_chunk = true; |
1361 | break; |
1362 | } |
1363 | } |
1364 | } |
1365 | |
1366 | if (in_first_chunk) { |
1367 | if (!is_vmalloc_addr(addr)) |
1368 | return __pa(addr); |
1369 | else |
1370 | return page_to_phys(vmalloc_to_page(addr)) + |
1371 | offset_in_page(addr); |
1372 | } else |
1373 | return page_to_phys(pcpu_addr_to_page(addr)) + |
1374 | offset_in_page(addr); |
1375 | } |
1376 | |
1377 | /** |
1378 | * pcpu_alloc_alloc_info - allocate percpu allocation info |
1379 | * @nr_groups: the number of groups |
1380 | * @nr_units: the number of units |
1381 | * |
1382 | * Allocate ai which is large enough for @nr_groups groups containing |
1383 | * @nr_units units. The returned ai's groups[0].cpu_map points to the |
1384 | * cpu_map array which is long enough for @nr_units and filled with |
1385 | * NR_CPUS. It's the caller's responsibility to initialize cpu_map |
1386 | * pointer of other groups. |
1387 | * |
1388 | * RETURNS: |
1389 | * Pointer to the allocated pcpu_alloc_info on success, NULL on |
1390 | * failure. |
1391 | */ |
1392 | struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, |
1393 | int nr_units) |
1394 | { |
1395 | struct pcpu_alloc_info *ai; |
1396 | size_t base_size, ai_size; |
1397 | void *ptr; |
1398 | int unit; |
1399 | |
1400 | base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), |
1401 | __alignof__(ai->groups[0].cpu_map[0])); |
1402 | ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); |
1403 | |
1404 | ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); |
1405 | if (!ptr) |
1406 | return NULL; |
1407 | ai = ptr; |
1408 | ptr += base_size; |
1409 | |
1410 | ai->groups[0].cpu_map = ptr; |
1411 | |
1412 | for (unit = 0; unit < nr_units; unit++) |
1413 | ai->groups[0].cpu_map[unit] = NR_CPUS; |
1414 | |
1415 | ai->nr_groups = nr_groups; |
1416 | ai->__ai_size = PFN_ALIGN(ai_size); |
1417 | |
1418 | return ai; |
1419 | } |
1420 | |
1421 | /** |
1422 | * pcpu_free_alloc_info - free percpu allocation info |
1423 | * @ai: pcpu_alloc_info to free |
1424 | * |
1425 | * Free @ai which was allocated by pcpu_alloc_alloc_info(). |
1426 | */ |
1427 | void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) |
1428 | { |
1429 | memblock_free_early(__pa(ai), ai->__ai_size); |
1430 | } |
1431 | |
1432 | /** |
1433 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info |
1434 | * @lvl: loglevel |
1435 | * @ai: allocation info to dump |
1436 | * |
1437 | * Print out information about @ai using loglevel @lvl. |
1438 | */ |
1439 | static void pcpu_dump_alloc_info(const char *lvl, |
1440 | const struct pcpu_alloc_info *ai) |
1441 | { |
1442 | int group_width = 1, cpu_width = 1, width; |
1443 | char empty_str[] = "--------"; |
1444 | int alloc = 0, alloc_end = 0; |
1445 | int group, v; |
1446 | int upa, apl; /* units per alloc, allocs per line */ |
1447 | |
1448 | v = ai->nr_groups; |
1449 | while (v /= 10) |
1450 | group_width++; |
1451 | |
1452 | v = num_possible_cpus(); |
1453 | while (v /= 10) |
1454 | cpu_width++; |
1455 | empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; |
1456 | |
1457 | upa = ai->alloc_size / ai->unit_size; |
1458 | width = upa * (cpu_width + 1) + group_width + 3; |
1459 | apl = rounddown_pow_of_two(max(60 / width, 1)); |
1460 | |
1461 | printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", |
1462 | lvl, ai->static_size, ai->reserved_size, ai->dyn_size, |
1463 | ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); |
1464 | |
1465 | for (group = 0; group < ai->nr_groups; group++) { |
1466 | const struct pcpu_group_info *gi = &ai->groups[group]; |
1467 | int unit = 0, unit_end = 0; |
1468 | |
1469 | BUG_ON(gi->nr_units % upa); |
1470 | for (alloc_end += gi->nr_units / upa; |
1471 | alloc < alloc_end; alloc++) { |
1472 | if (!(alloc % apl)) { |
1473 | pr_cont("\n"); |
1474 | printk("%spcpu-alloc: ", lvl); |
1475 | } |
1476 | pr_cont("[%0*d] ", group_width, group); |
1477 | |
1478 | for (unit_end += upa; unit < unit_end; unit++) |
1479 | if (gi->cpu_map[unit] != NR_CPUS) |
1480 | pr_cont("%0*d ", |
1481 | cpu_width, gi->cpu_map[unit]); |
1482 | else |
1483 | pr_cont("%s ", empty_str); |
1484 | } |
1485 | } |
1486 | pr_cont("\n"); |
1487 | } |
1488 | |
1489 | /** |
1490 | * pcpu_setup_first_chunk - initialize the first percpu chunk |
1491 | * @ai: pcpu_alloc_info describing how to percpu area is shaped |
1492 | * @base_addr: mapped address |
1493 | * |
1494 | * Initialize the first percpu chunk which contains the kernel static |
1495 | * perpcu area. This function is to be called from arch percpu area |
1496 | * setup path. |
1497 | * |
1498 | * @ai contains all information necessary to initialize the first |
1499 | * chunk and prime the dynamic percpu allocator. |
1500 | * |
1501 | * @ai->static_size is the size of static percpu area. |
1502 | * |
1503 | * @ai->reserved_size, if non-zero, specifies the amount of bytes to |
1504 | * reserve after the static area in the first chunk. This reserves |
1505 | * the first chunk such that it's available only through reserved |
1506 | * percpu allocation. This is primarily used to serve module percpu |
1507 | * static areas on architectures where the addressing model has |
1508 | * limited offset range for symbol relocations to guarantee module |
1509 | * percpu symbols fall inside the relocatable range. |
1510 | * |
1511 | * @ai->dyn_size determines the number of bytes available for dynamic |
1512 | * allocation in the first chunk. The area between @ai->static_size + |
1513 | * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. |
1514 | * |
1515 | * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE |
1516 | * and equal to or larger than @ai->static_size + @ai->reserved_size + |
1517 | * @ai->dyn_size. |
1518 | * |
1519 | * @ai->atom_size is the allocation atom size and used as alignment |
1520 | * for vm areas. |
1521 | * |
1522 | * @ai->alloc_size is the allocation size and always multiple of |
1523 | * @ai->atom_size. This is larger than @ai->atom_size if |
1524 | * @ai->unit_size is larger than @ai->atom_size. |
1525 | * |
1526 | * @ai->nr_groups and @ai->groups describe virtual memory layout of |
1527 | * percpu areas. Units which should be colocated are put into the |
1528 | * same group. Dynamic VM areas will be allocated according to these |
1529 | * groupings. If @ai->nr_groups is zero, a single group containing |
1530 | * all units is assumed. |
1531 | * |
1532 | * The caller should have mapped the first chunk at @base_addr and |
1533 | * copied static data to each unit. |
1534 | * |
1535 | * If the first chunk ends up with both reserved and dynamic areas, it |
1536 | * is served by two chunks - one to serve the core static and reserved |
1537 | * areas and the other for the dynamic area. They share the same vm |
1538 | * and page map but uses different area allocation map to stay away |
1539 | * from each other. The latter chunk is circulated in the chunk slots |
1540 | * and available for dynamic allocation like any other chunks. |
1541 | * |
1542 | * RETURNS: |
1543 | * 0 on success, -errno on failure. |
1544 | */ |
1545 | int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
1546 | void *base_addr) |
1547 | { |
1548 | static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; |
1549 | static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; |
1550 | size_t dyn_size = ai->dyn_size; |
1551 | size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; |
1552 | struct pcpu_chunk *schunk, *dchunk = NULL; |
1553 | unsigned long *group_offsets; |
1554 | size_t *group_sizes; |
1555 | unsigned long *unit_off; |
1556 | unsigned int cpu; |
1557 | int *unit_map; |
1558 | int group, unit, i; |
1559 | |
1560 | #define PCPU_SETUP_BUG_ON(cond) do { \ |
1561 | if (unlikely(cond)) { \ |
1562 | pr_emerg("failed to initialize, %s\n", #cond); \ |
1563 | pr_emerg("cpu_possible_mask=%*pb\n", \ |
1564 | cpumask_pr_args(cpu_possible_mask)); \ |
1565 | pcpu_dump_alloc_info(KERN_EMERG, ai); \ |
1566 | BUG(); \ |
1567 | } \ |
1568 | } while (0) |
1569 | |
1570 | /* sanity checks */ |
1571 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); |
1572 | #ifdef CONFIG_SMP |
1573 | PCPU_SETUP_BUG_ON(!ai->static_size); |
1574 | PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); |
1575 | #endif |
1576 | PCPU_SETUP_BUG_ON(!base_addr); |
1577 | PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); |
1578 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); |
1579 | PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); |
1580 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); |
1581 | PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); |
1582 | PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); |
1583 | |
1584 | /* process group information and build config tables accordingly */ |
1585 | group_offsets = memblock_virt_alloc(ai->nr_groups * |
1586 | sizeof(group_offsets[0]), 0); |
1587 | group_sizes = memblock_virt_alloc(ai->nr_groups * |
1588 | sizeof(group_sizes[0]), 0); |
1589 | unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); |
1590 | unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); |
1591 | |
1592 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
1593 | unit_map[cpu] = UINT_MAX; |
1594 | |
1595 | pcpu_low_unit_cpu = NR_CPUS; |
1596 | pcpu_high_unit_cpu = NR_CPUS; |
1597 | |
1598 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { |
1599 | const struct pcpu_group_info *gi = &ai->groups[group]; |
1600 | |
1601 | group_offsets[group] = gi->base_offset; |
1602 | group_sizes[group] = gi->nr_units * ai->unit_size; |
1603 | |
1604 | for (i = 0; i < gi->nr_units; i++) { |
1605 | cpu = gi->cpu_map[i]; |
1606 | if (cpu == NR_CPUS) |
1607 | continue; |
1608 | |
1609 | PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); |
1610 | PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); |
1611 | PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); |
1612 | |
1613 | unit_map[cpu] = unit + i; |
1614 | unit_off[cpu] = gi->base_offset + i * ai->unit_size; |
1615 | |
1616 | /* determine low/high unit_cpu */ |
1617 | if (pcpu_low_unit_cpu == NR_CPUS || |
1618 | unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) |
1619 | pcpu_low_unit_cpu = cpu; |
1620 | if (pcpu_high_unit_cpu == NR_CPUS || |
1621 | unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) |
1622 | pcpu_high_unit_cpu = cpu; |
1623 | } |
1624 | } |
1625 | pcpu_nr_units = unit; |
1626 | |
1627 | for_each_possible_cpu(cpu) |
1628 | PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); |
1629 | |
1630 | /* we're done parsing the input, undefine BUG macro and dump config */ |
1631 | #undef PCPU_SETUP_BUG_ON |
1632 | pcpu_dump_alloc_info(KERN_DEBUG, ai); |
1633 | |
1634 | pcpu_nr_groups = ai->nr_groups; |
1635 | pcpu_group_offsets = group_offsets; |
1636 | pcpu_group_sizes = group_sizes; |
1637 | pcpu_unit_map = unit_map; |
1638 | pcpu_unit_offsets = unit_off; |
1639 | |
1640 | /* determine basic parameters */ |
1641 | pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; |
1642 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
1643 | pcpu_atom_size = ai->atom_size; |
1644 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + |
1645 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); |
1646 | |
1647 | /* |
1648 | * Allocate chunk slots. The additional last slot is for |
1649 | * empty chunks. |
1650 | */ |
1651 | pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; |
1652 | pcpu_slot = memblock_virt_alloc( |
1653 | pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); |
1654 | for (i = 0; i < pcpu_nr_slots; i++) |
1655 | INIT_LIST_HEAD(&pcpu_slot[i]); |
1656 | |
1657 | /* |
1658 | * Initialize static chunk. If reserved_size is zero, the |
1659 | * static chunk covers static area + dynamic allocation area |
1660 | * in the first chunk. If reserved_size is not zero, it |
1661 | * covers static area + reserved area (mostly used for module |
1662 | * static percpu allocation). |
1663 | */ |
1664 | schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
1665 | INIT_LIST_HEAD(&schunk->list); |
1666 | INIT_LIST_HEAD(&schunk->map_extend_list); |
1667 | schunk->base_addr = base_addr; |
1668 | schunk->map = smap; |
1669 | schunk->map_alloc = ARRAY_SIZE(smap); |
1670 | schunk->immutable = true; |
1671 | bitmap_fill(schunk->populated, pcpu_unit_pages); |
1672 | schunk->nr_populated = pcpu_unit_pages; |
1673 | |
1674 | if (ai->reserved_size) { |
1675 | schunk->free_size = ai->reserved_size; |
1676 | pcpu_reserved_chunk = schunk; |
1677 | pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; |
1678 | } else { |
1679 | schunk->free_size = dyn_size; |
1680 | dyn_size = 0; /* dynamic area covered */ |
1681 | } |
1682 | schunk->contig_hint = schunk->free_size; |
1683 | |
1684 | schunk->map[0] = 1; |
1685 | schunk->map[1] = ai->static_size; |
1686 | schunk->map_used = 1; |
1687 | if (schunk->free_size) |
1688 | schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size; |
1689 | schunk->map[schunk->map_used] |= 1; |
1690 | |
1691 | /* init dynamic chunk if necessary */ |
1692 | if (dyn_size) { |
1693 | dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
1694 | INIT_LIST_HEAD(&dchunk->list); |
1695 | INIT_LIST_HEAD(&dchunk->map_extend_list); |
1696 | dchunk->base_addr = base_addr; |
1697 | dchunk->map = dmap; |
1698 | dchunk->map_alloc = ARRAY_SIZE(dmap); |
1699 | dchunk->immutable = true; |
1700 | bitmap_fill(dchunk->populated, pcpu_unit_pages); |
1701 | dchunk->nr_populated = pcpu_unit_pages; |
1702 | |
1703 | dchunk->contig_hint = dchunk->free_size = dyn_size; |
1704 | dchunk->map[0] = 1; |
1705 | dchunk->map[1] = pcpu_reserved_chunk_limit; |
1706 | dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1; |
1707 | dchunk->map_used = 2; |
1708 | } |
1709 | |
1710 | /* link the first chunk in */ |
1711 | pcpu_first_chunk = dchunk ?: schunk; |
1712 | pcpu_nr_empty_pop_pages += |
1713 | pcpu_count_occupied_pages(pcpu_first_chunk, 1); |
1714 | pcpu_chunk_relocate(pcpu_first_chunk, -1); |
1715 | |
1716 | /* we're done */ |
1717 | pcpu_base_addr = base_addr; |
1718 | return 0; |
1719 | } |
1720 | |
1721 | #ifdef CONFIG_SMP |
1722 | |
1723 | const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { |
1724 | [PCPU_FC_AUTO] = "auto", |
1725 | [PCPU_FC_EMBED] = "embed", |
1726 | [PCPU_FC_PAGE] = "page", |
1727 | }; |
1728 | |
1729 | enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; |
1730 | |
1731 | static int __init percpu_alloc_setup(char *str) |
1732 | { |
1733 | if (!str) |
1734 | return -EINVAL; |
1735 | |
1736 | if (0) |
1737 | /* nada */; |
1738 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK |
1739 | else if (!strcmp(str, "embed")) |
1740 | pcpu_chosen_fc = PCPU_FC_EMBED; |
1741 | #endif |
1742 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
1743 | else if (!strcmp(str, "page")) |
1744 | pcpu_chosen_fc = PCPU_FC_PAGE; |
1745 | #endif |
1746 | else |
1747 | pr_warn("unknown allocator %s specified\n", str); |
1748 | |
1749 | return 0; |
1750 | } |
1751 | early_param("percpu_alloc", percpu_alloc_setup); |
1752 | |
1753 | /* |
1754 | * pcpu_embed_first_chunk() is used by the generic percpu setup. |
1755 | * Build it if needed by the arch config or the generic setup is going |
1756 | * to be used. |
1757 | */ |
1758 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ |
1759 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
1760 | #define BUILD_EMBED_FIRST_CHUNK |
1761 | #endif |
1762 | |
1763 | /* build pcpu_page_first_chunk() iff needed by the arch config */ |
1764 | #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) |
1765 | #define BUILD_PAGE_FIRST_CHUNK |
1766 | #endif |
1767 | |
1768 | /* pcpu_build_alloc_info() is used by both embed and page first chunk */ |
1769 | #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) |
1770 | /** |
1771 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs |
1772 | * @reserved_size: the size of reserved percpu area in bytes |
1773 | * @dyn_size: minimum free size for dynamic allocation in bytes |
1774 | * @atom_size: allocation atom size |
1775 | * @cpu_distance_fn: callback to determine distance between cpus, optional |
1776 | * |
1777 | * This function determines grouping of units, their mappings to cpus |
1778 | * and other parameters considering needed percpu size, allocation |
1779 | * atom size and distances between CPUs. |
1780 | * |
1781 | * Groups are always multiples of atom size and CPUs which are of |
1782 | * LOCAL_DISTANCE both ways are grouped together and share space for |
1783 | * units in the same group. The returned configuration is guaranteed |
1784 | * to have CPUs on different nodes on different groups and >=75% usage |
1785 | * of allocated virtual address space. |
1786 | * |
1787 | * RETURNS: |
1788 | * On success, pointer to the new allocation_info is returned. On |
1789 | * failure, ERR_PTR value is returned. |
1790 | */ |
1791 | static struct pcpu_alloc_info * __init pcpu_build_alloc_info( |
1792 | size_t reserved_size, size_t dyn_size, |
1793 | size_t atom_size, |
1794 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) |
1795 | { |
1796 | static int group_map[NR_CPUS] __initdata; |
1797 | static int group_cnt[NR_CPUS] __initdata; |
1798 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
1799 | int nr_groups = 1, nr_units = 0; |
1800 | size_t size_sum, min_unit_size, alloc_size; |
1801 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ |
1802 | int last_allocs, group, unit; |
1803 | unsigned int cpu, tcpu; |
1804 | struct pcpu_alloc_info *ai; |
1805 | unsigned int *cpu_map; |
1806 | |
1807 | /* this function may be called multiple times */ |
1808 | memset(group_map, 0, sizeof(group_map)); |
1809 | memset(group_cnt, 0, sizeof(group_cnt)); |
1810 | |
1811 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ |
1812 | size_sum = PFN_ALIGN(static_size + reserved_size + |
1813 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); |
1814 | dyn_size = size_sum - static_size - reserved_size; |
1815 | |
1816 | /* |
1817 | * Determine min_unit_size, alloc_size and max_upa such that |
1818 | * alloc_size is multiple of atom_size and is the smallest |
1819 | * which can accommodate 4k aligned segments which are equal to |
1820 | * or larger than min_unit_size. |
1821 | */ |
1822 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); |
1823 | |
1824 | alloc_size = roundup(min_unit_size, atom_size); |
1825 | upa = alloc_size / min_unit_size; |
1826 | while (alloc_size % upa || (offset_in_page(alloc_size / upa))) |
1827 | upa--; |
1828 | max_upa = upa; |
1829 | |
1830 | /* group cpus according to their proximity */ |
1831 | for_each_possible_cpu(cpu) { |
1832 | group = 0; |
1833 | next_group: |
1834 | for_each_possible_cpu(tcpu) { |
1835 | if (cpu == tcpu) |
1836 | break; |
1837 | if (group_map[tcpu] == group && cpu_distance_fn && |
1838 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || |
1839 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { |
1840 | group++; |
1841 | nr_groups = max(nr_groups, group + 1); |
1842 | goto next_group; |
1843 | } |
1844 | } |
1845 | group_map[cpu] = group; |
1846 | group_cnt[group]++; |
1847 | } |
1848 | |
1849 | /* |
1850 | * Expand unit size until address space usage goes over 75% |
1851 | * and then as much as possible without using more address |
1852 | * space. |
1853 | */ |
1854 | last_allocs = INT_MAX; |
1855 | for (upa = max_upa; upa; upa--) { |
1856 | int allocs = 0, wasted = 0; |
1857 | |
1858 | if (alloc_size % upa || (offset_in_page(alloc_size / upa))) |
1859 | continue; |
1860 | |
1861 | for (group = 0; group < nr_groups; group++) { |
1862 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); |
1863 | allocs += this_allocs; |
1864 | wasted += this_allocs * upa - group_cnt[group]; |
1865 | } |
1866 | |
1867 | /* |
1868 | * Don't accept if wastage is over 1/3. The |
1869 | * greater-than comparison ensures upa==1 always |
1870 | * passes the following check. |
1871 | */ |
1872 | if (wasted > num_possible_cpus() / 3) |
1873 | continue; |
1874 | |
1875 | /* and then don't consume more memory */ |
1876 | if (allocs > last_allocs) |
1877 | break; |
1878 | last_allocs = allocs; |
1879 | best_upa = upa; |
1880 | } |
1881 | upa = best_upa; |
1882 | |
1883 | /* allocate and fill alloc_info */ |
1884 | for (group = 0; group < nr_groups; group++) |
1885 | nr_units += roundup(group_cnt[group], upa); |
1886 | |
1887 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); |
1888 | if (!ai) |
1889 | return ERR_PTR(-ENOMEM); |
1890 | cpu_map = ai->groups[0].cpu_map; |
1891 | |
1892 | for (group = 0; group < nr_groups; group++) { |
1893 | ai->groups[group].cpu_map = cpu_map; |
1894 | cpu_map += roundup(group_cnt[group], upa); |
1895 | } |
1896 | |
1897 | ai->static_size = static_size; |
1898 | ai->reserved_size = reserved_size; |
1899 | ai->dyn_size = dyn_size; |
1900 | ai->unit_size = alloc_size / upa; |
1901 | ai->atom_size = atom_size; |
1902 | ai->alloc_size = alloc_size; |
1903 | |
1904 | for (group = 0, unit = 0; group_cnt[group]; group++) { |
1905 | struct pcpu_group_info *gi = &ai->groups[group]; |
1906 | |
1907 | /* |
1908 | * Initialize base_offset as if all groups are located |
1909 | * back-to-back. The caller should update this to |
1910 | * reflect actual allocation. |
1911 | */ |
1912 | gi->base_offset = unit * ai->unit_size; |
1913 | |
1914 | for_each_possible_cpu(cpu) |
1915 | if (group_map[cpu] == group) |
1916 | gi->cpu_map[gi->nr_units++] = cpu; |
1917 | gi->nr_units = roundup(gi->nr_units, upa); |
1918 | unit += gi->nr_units; |
1919 | } |
1920 | BUG_ON(unit != nr_units); |
1921 | |
1922 | return ai; |
1923 | } |
1924 | #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ |
1925 | |
1926 | #if defined(BUILD_EMBED_FIRST_CHUNK) |
1927 | /** |
1928 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem |
1929 | * @reserved_size: the size of reserved percpu area in bytes |
1930 | * @dyn_size: minimum free size for dynamic allocation in bytes |
1931 | * @atom_size: allocation atom size |
1932 | * @cpu_distance_fn: callback to determine distance between cpus, optional |
1933 | * @alloc_fn: function to allocate percpu page |
1934 | * @free_fn: function to free percpu page |
1935 | * |
1936 | * This is a helper to ease setting up embedded first percpu chunk and |
1937 | * can be called where pcpu_setup_first_chunk() is expected. |
1938 | * |
1939 | * If this function is used to setup the first chunk, it is allocated |
1940 | * by calling @alloc_fn and used as-is without being mapped into |
1941 | * vmalloc area. Allocations are always whole multiples of @atom_size |
1942 | * aligned to @atom_size. |
1943 | * |
1944 | * This enables the first chunk to piggy back on the linear physical |
1945 | * mapping which often uses larger page size. Please note that this |
1946 | * can result in very sparse cpu->unit mapping on NUMA machines thus |
1947 | * requiring large vmalloc address space. Don't use this allocator if |
1948 | * vmalloc space is not orders of magnitude larger than distances |
1949 | * between node memory addresses (ie. 32bit NUMA machines). |
1950 | * |
1951 | * @dyn_size specifies the minimum dynamic area size. |
1952 | * |
1953 | * If the needed size is smaller than the minimum or specified unit |
1954 | * size, the leftover is returned using @free_fn. |
1955 | * |
1956 | * RETURNS: |
1957 | * 0 on success, -errno on failure. |
1958 | */ |
1959 | int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, |
1960 | size_t atom_size, |
1961 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, |
1962 | pcpu_fc_alloc_fn_t alloc_fn, |
1963 | pcpu_fc_free_fn_t free_fn) |
1964 | { |
1965 | void *base = (void *)ULONG_MAX; |
1966 | void **areas = NULL; |
1967 | struct pcpu_alloc_info *ai; |
1968 | size_t size_sum, areas_size; |
1969 | unsigned long max_distance; |
1970 | int group, i, highest_group, rc; |
1971 | |
1972 | ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, |
1973 | cpu_distance_fn); |
1974 | if (IS_ERR(ai)) |
1975 | return PTR_ERR(ai); |
1976 | |
1977 | size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; |
1978 | areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); |
1979 | |
1980 | areas = memblock_virt_alloc_nopanic(areas_size, 0); |
1981 | if (!areas) { |
1982 | rc = -ENOMEM; |
1983 | goto out_free; |
1984 | } |
1985 | |
1986 | /* allocate, copy and determine base address & max_distance */ |
1987 | highest_group = 0; |
1988 | for (group = 0; group < ai->nr_groups; group++) { |
1989 | struct pcpu_group_info *gi = &ai->groups[group]; |
1990 | unsigned int cpu = NR_CPUS; |
1991 | void *ptr; |
1992 | |
1993 | for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) |
1994 | cpu = gi->cpu_map[i]; |
1995 | BUG_ON(cpu == NR_CPUS); |
1996 | |
1997 | /* allocate space for the whole group */ |
1998 | ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); |
1999 | if (!ptr) { |
2000 | rc = -ENOMEM; |
2001 | goto out_free_areas; |
2002 | } |
2003 | /* kmemleak tracks the percpu allocations separately */ |
2004 | kmemleak_free(ptr); |
2005 | areas[group] = ptr; |
2006 | |
2007 | base = min(ptr, base); |
2008 | if (ptr > areas[highest_group]) |
2009 | highest_group = group; |
2010 | } |
2011 | max_distance = areas[highest_group] - base; |
2012 | max_distance += ai->unit_size * ai->groups[highest_group].nr_units; |
2013 | |
2014 | /* warn if maximum distance is further than 75% of vmalloc space */ |
2015 | if (max_distance > VMALLOC_TOTAL * 3 / 4) { |
2016 | pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", |
2017 | max_distance, VMALLOC_TOTAL); |
2018 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
2019 | /* and fail if we have fallback */ |
2020 | rc = -EINVAL; |
2021 | goto out_free_areas; |
2022 | #endif |
2023 | } |
2024 | |
2025 | /* |
2026 | * Copy data and free unused parts. This should happen after all |
2027 | * allocations are complete; otherwise, we may end up with |
2028 | * overlapping groups. |
2029 | */ |
2030 | for (group = 0; group < ai->nr_groups; group++) { |
2031 | struct pcpu_group_info *gi = &ai->groups[group]; |
2032 | void *ptr = areas[group]; |
2033 | |
2034 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { |
2035 | if (gi->cpu_map[i] == NR_CPUS) { |
2036 | /* unused unit, free whole */ |
2037 | free_fn(ptr, ai->unit_size); |
2038 | continue; |
2039 | } |
2040 | /* copy and return the unused part */ |
2041 | memcpy(ptr, __per_cpu_load, ai->static_size); |
2042 | free_fn(ptr + size_sum, ai->unit_size - size_sum); |
2043 | } |
2044 | } |
2045 | |
2046 | /* base address is now known, determine group base offsets */ |
2047 | for (group = 0; group < ai->nr_groups; group++) { |
2048 | ai->groups[group].base_offset = areas[group] - base; |
2049 | } |
2050 | |
2051 | pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", |
2052 | PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, |
2053 | ai->dyn_size, ai->unit_size); |
2054 | |
2055 | rc = pcpu_setup_first_chunk(ai, base); |
2056 | goto out_free; |
2057 | |
2058 | out_free_areas: |
2059 | for (group = 0; group < ai->nr_groups; group++) |
2060 | if (areas[group]) |
2061 | free_fn(areas[group], |
2062 | ai->groups[group].nr_units * ai->unit_size); |
2063 | out_free: |
2064 | pcpu_free_alloc_info(ai); |
2065 | if (areas) |
2066 | memblock_free_early(__pa(areas), areas_size); |
2067 | return rc; |
2068 | } |
2069 | #endif /* BUILD_EMBED_FIRST_CHUNK */ |
2070 | |
2071 | #ifdef BUILD_PAGE_FIRST_CHUNK |
2072 | /** |
2073 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
2074 | * @reserved_size: the size of reserved percpu area in bytes |
2075 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE |
2076 | * @free_fn: function to free percpu page, always called with PAGE_SIZE |
2077 | * @populate_pte_fn: function to populate pte |
2078 | * |
2079 | * This is a helper to ease setting up page-remapped first percpu |
2080 | * chunk and can be called where pcpu_setup_first_chunk() is expected. |
2081 | * |
2082 | * This is the basic allocator. Static percpu area is allocated |
2083 | * page-by-page into vmalloc area. |
2084 | * |
2085 | * RETURNS: |
2086 | * 0 on success, -errno on failure. |
2087 | */ |
2088 | int __init pcpu_page_first_chunk(size_t reserved_size, |
2089 | pcpu_fc_alloc_fn_t alloc_fn, |
2090 | pcpu_fc_free_fn_t free_fn, |
2091 | pcpu_fc_populate_pte_fn_t populate_pte_fn) |
2092 | { |
2093 | static struct vm_struct vm; |
2094 | struct pcpu_alloc_info *ai; |
2095 | char psize_str[16]; |
2096 | int unit_pages; |
2097 | size_t pages_size; |
2098 | struct page **pages; |
2099 | int unit, i, j, rc; |
2100 | |
2101 | snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); |
2102 | |
2103 | ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); |
2104 | if (IS_ERR(ai)) |
2105 | return PTR_ERR(ai); |
2106 | BUG_ON(ai->nr_groups != 1); |
2107 | BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); |
2108 | |
2109 | unit_pages = ai->unit_size >> PAGE_SHIFT; |
2110 | |
2111 | /* unaligned allocations can't be freed, round up to page size */ |
2112 | pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * |
2113 | sizeof(pages[0])); |
2114 | pages = memblock_virt_alloc(pages_size, 0); |
2115 | |
2116 | /* allocate pages */ |
2117 | j = 0; |
2118 | for (unit = 0; unit < num_possible_cpus(); unit++) |
2119 | for (i = 0; i < unit_pages; i++) { |
2120 | unsigned int cpu = ai->groups[0].cpu_map[unit]; |
2121 | void *ptr; |
2122 | |
2123 | ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); |
2124 | if (!ptr) { |
2125 | pr_warn("failed to allocate %s page for cpu%u\n", |
2126 | psize_str, cpu); |
2127 | goto enomem; |
2128 | } |
2129 | /* kmemleak tracks the percpu allocations separately */ |
2130 | kmemleak_free(ptr); |
2131 | pages[j++] = virt_to_page(ptr); |
2132 | } |
2133 | |
2134 | /* allocate vm area, map the pages and copy static data */ |
2135 | vm.flags = VM_ALLOC; |
2136 | vm.size = num_possible_cpus() * ai->unit_size; |
2137 | vm_area_register_early(&vm, PAGE_SIZE); |
2138 | |
2139 | for (unit = 0; unit < num_possible_cpus(); unit++) { |
2140 | unsigned long unit_addr = |
2141 | (unsigned long)vm.addr + unit * ai->unit_size; |
2142 | |
2143 | for (i = 0; i < unit_pages; i++) |
2144 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); |
2145 | |
2146 | /* pte already populated, the following shouldn't fail */ |
2147 | rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], |
2148 | unit_pages); |
2149 | if (rc < 0) |
2150 | panic("failed to map percpu area, err=%d\n", rc); |
2151 | |
2152 | /* |
2153 | * FIXME: Archs with virtual cache should flush local |
2154 | * cache for the linear mapping here - something |
2155 | * equivalent to flush_cache_vmap() on the local cpu. |
2156 | * flush_cache_vmap() can't be used as most supporting |
2157 | * data structures are not set up yet. |
2158 | */ |
2159 | |
2160 | /* copy static data */ |
2161 | memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); |
2162 | } |
2163 | |
2164 | /* we're ready, commit */ |
2165 | pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", |
2166 | unit_pages, psize_str, ai->static_size, |
2167 | ai->reserved_size, ai->dyn_size); |
2168 | |
2169 | rc = pcpu_setup_first_chunk(ai, vm.addr); |
2170 | goto out_free_ar; |
2171 | |
2172 | enomem: |
2173 | while (--j >= 0) |
2174 | free_fn(page_address(pages[j]), PAGE_SIZE); |
2175 | rc = -ENOMEM; |
2176 | out_free_ar: |
2177 | memblock_free_early(__pa(pages), pages_size); |
2178 | pcpu_free_alloc_info(ai); |
2179 | return rc; |
2180 | } |
2181 | #endif /* BUILD_PAGE_FIRST_CHUNK */ |
2182 | |
2183 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
2184 | /* |
2185 | * Generic SMP percpu area setup. |
2186 | * |
2187 | * The embedding helper is used because its behavior closely resembles |
2188 | * the original non-dynamic generic percpu area setup. This is |
2189 | * important because many archs have addressing restrictions and might |
2190 | * fail if the percpu area is located far away from the previous |
2191 | * location. As an added bonus, in non-NUMA cases, embedding is |
2192 | * generally a good idea TLB-wise because percpu area can piggy back |
2193 | * on the physical linear memory mapping which uses large page |
2194 | * mappings on applicable archs. |
2195 | */ |
2196 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
2197 | EXPORT_SYMBOL(__per_cpu_offset); |
2198 | |
2199 | static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, |
2200 | size_t align) |
2201 | { |
2202 | return memblock_virt_alloc_from_nopanic( |
2203 | size, align, __pa(MAX_DMA_ADDRESS)); |
2204 | } |
2205 | |
2206 | static void __init pcpu_dfl_fc_free(void *ptr, size_t size) |
2207 | { |
2208 | memblock_free_early(__pa(ptr), size); |
2209 | } |
2210 | |
2211 | void __init setup_per_cpu_areas(void) |
2212 | { |
2213 | unsigned long delta; |
2214 | unsigned int cpu; |
2215 | int rc; |
2216 | |
2217 | /* |
2218 | * Always reserve area for module percpu variables. That's |
2219 | * what the legacy allocator did. |
2220 | */ |
2221 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
2222 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, |
2223 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); |
2224 | if (rc < 0) |
2225 | panic("Failed to initialize percpu areas."); |
2226 | |
2227 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
2228 | for_each_possible_cpu(cpu) |
2229 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
2230 | } |
2231 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
2232 | |
2233 | #else /* CONFIG_SMP */ |
2234 | |
2235 | /* |
2236 | * UP percpu area setup. |
2237 | * |
2238 | * UP always uses km-based percpu allocator with identity mapping. |
2239 | * Static percpu variables are indistinguishable from the usual static |
2240 | * variables and don't require any special preparation. |
2241 | */ |
2242 | void __init setup_per_cpu_areas(void) |
2243 | { |
2244 | const size_t unit_size = |
2245 | roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, |
2246 | PERCPU_DYNAMIC_RESERVE)); |
2247 | struct pcpu_alloc_info *ai; |
2248 | void *fc; |
2249 | |
2250 | ai = pcpu_alloc_alloc_info(1, 1); |
2251 | fc = memblock_virt_alloc_from_nopanic(unit_size, |
2252 | PAGE_SIZE, |
2253 | __pa(MAX_DMA_ADDRESS)); |
2254 | if (!ai || !fc) |
2255 | panic("Failed to allocate memory for percpu areas."); |
2256 | /* kmemleak tracks the percpu allocations separately */ |
2257 | kmemleak_free(fc); |
2258 | |
2259 | ai->dyn_size = unit_size; |
2260 | ai->unit_size = unit_size; |
2261 | ai->atom_size = unit_size; |
2262 | ai->alloc_size = unit_size; |
2263 | ai->groups[0].nr_units = 1; |
2264 | ai->groups[0].cpu_map[0] = 0; |
2265 | |
2266 | if (pcpu_setup_first_chunk(ai, fc) < 0) |
2267 | panic("Failed to initialize percpu areas."); |
2268 | } |
2269 | |
2270 | #endif /* CONFIG_SMP */ |
2271 | |
2272 | /* |
2273 | * First and reserved chunks are initialized with temporary allocation |
2274 | * map in initdata so that they can be used before slab is online. |
2275 | * This function is called after slab is brought up and replaces those |
2276 | * with properly allocated maps. |
2277 | */ |
2278 | void __init percpu_init_late(void) |
2279 | { |
2280 | struct pcpu_chunk *target_chunks[] = |
2281 | { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; |
2282 | struct pcpu_chunk *chunk; |
2283 | unsigned long flags; |
2284 | int i; |
2285 | |
2286 | for (i = 0; (chunk = target_chunks[i]); i++) { |
2287 | int *map; |
2288 | const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); |
2289 | |
2290 | BUILD_BUG_ON(size > PAGE_SIZE); |
2291 | |
2292 | map = pcpu_mem_zalloc(size); |
2293 | BUG_ON(!map); |
2294 | |
2295 | spin_lock_irqsave(&pcpu_lock, flags); |
2296 | memcpy(map, chunk->map, size); |
2297 | chunk->map = map; |
2298 | spin_unlock_irqrestore(&pcpu_lock, flags); |
2299 | } |
2300 | } |
2301 | |
2302 | /* |
2303 | * Percpu allocator is initialized early during boot when neither slab or |
2304 | * workqueue is available. Plug async management until everything is up |
2305 | * and running. |
2306 | */ |
2307 | static int __init percpu_enable_async(void) |
2308 | { |
2309 | pcpu_async_enabled = true; |
2310 | return 0; |
2311 | } |
2312 | subsys_initcall(percpu_enable_async); |
2313 |