blob: ceb7d70cdb76436d8606b7e9272f09b7fa079cb8
1 | #ifndef MM_SLAB_H |
2 | #define MM_SLAB_H |
3 | /* |
4 | * Internal slab definitions |
5 | */ |
6 | |
7 | #ifdef CONFIG_SLOB |
8 | /* |
9 | * Common fields provided in kmem_cache by all slab allocators |
10 | * This struct is either used directly by the allocator (SLOB) |
11 | * or the allocator must include definitions for all fields |
12 | * provided in kmem_cache_common in their definition of kmem_cache. |
13 | * |
14 | * Once we can do anonymous structs (C11 standard) we could put a |
15 | * anonymous struct definition in these allocators so that the |
16 | * separate allocations in the kmem_cache structure of SLAB and |
17 | * SLUB is no longer needed. |
18 | */ |
19 | struct kmem_cache { |
20 | unsigned int object_size;/* The original size of the object */ |
21 | unsigned int size; /* The aligned/padded/added on size */ |
22 | unsigned int align; /* Alignment as calculated */ |
23 | unsigned long flags; /* Active flags on the slab */ |
24 | const char *name; /* Slab name for sysfs */ |
25 | int refcount; /* Use counter */ |
26 | void (*ctor)(void *); /* Called on object slot creation */ |
27 | struct list_head list; /* List of all slab caches on the system */ |
28 | }; |
29 | |
30 | #endif /* CONFIG_SLOB */ |
31 | |
32 | #ifdef CONFIG_SLAB |
33 | #include <linux/slab_def.h> |
34 | #endif |
35 | |
36 | #ifdef CONFIG_SLUB |
37 | #include <linux/slub_def.h> |
38 | #endif |
39 | |
40 | #include <linux/memcontrol.h> |
41 | #include <linux/fault-inject.h> |
42 | #include <linux/kmemcheck.h> |
43 | #include <linux/kasan.h> |
44 | #include <linux/kmemleak.h> |
45 | #include <linux/random.h> |
46 | |
47 | /* |
48 | * State of the slab allocator. |
49 | * |
50 | * This is used to describe the states of the allocator during bootup. |
51 | * Allocators use this to gradually bootstrap themselves. Most allocators |
52 | * have the problem that the structures used for managing slab caches are |
53 | * allocated from slab caches themselves. |
54 | */ |
55 | enum slab_state { |
56 | DOWN, /* No slab functionality yet */ |
57 | PARTIAL, /* SLUB: kmem_cache_node available */ |
58 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
59 | UP, /* Slab caches usable but not all extras yet */ |
60 | FULL /* Everything is working */ |
61 | }; |
62 | |
63 | extern enum slab_state slab_state; |
64 | |
65 | /* The slab cache mutex protects the management structures during changes */ |
66 | extern struct mutex slab_mutex; |
67 | |
68 | /* The list of all slab caches on the system */ |
69 | extern struct list_head slab_caches; |
70 | |
71 | /* The slab cache that manages slab cache information */ |
72 | extern struct kmem_cache *kmem_cache; |
73 | |
74 | unsigned long calculate_alignment(unsigned long flags, |
75 | unsigned long align, unsigned long size); |
76 | |
77 | #ifndef CONFIG_SLOB |
78 | /* Kmalloc array related functions */ |
79 | void setup_kmalloc_cache_index_table(void); |
80 | void create_kmalloc_caches(unsigned long); |
81 | |
82 | /* Find the kmalloc slab corresponding for a certain size */ |
83 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); |
84 | #endif |
85 | |
86 | |
87 | /* Functions provided by the slab allocators */ |
88 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
89 | |
90 | extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, |
91 | unsigned long flags); |
92 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
93 | size_t size, unsigned long flags); |
94 | |
95 | int slab_unmergeable(struct kmem_cache *s); |
96 | struct kmem_cache *find_mergeable(size_t size, size_t align, |
97 | unsigned long flags, const char *name, void (*ctor)(void *)); |
98 | #ifndef CONFIG_SLOB |
99 | struct kmem_cache * |
100 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
101 | unsigned long flags, void (*ctor)(void *)); |
102 | |
103 | unsigned long kmem_cache_flags(unsigned long object_size, |
104 | unsigned long flags, const char *name, |
105 | void (*ctor)(void *)); |
106 | #else |
107 | static inline struct kmem_cache * |
108 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
109 | unsigned long flags, void (*ctor)(void *)) |
110 | { return NULL; } |
111 | |
112 | static inline unsigned long kmem_cache_flags(unsigned long object_size, |
113 | unsigned long flags, const char *name, |
114 | void (*ctor)(void *)) |
115 | { |
116 | return flags; |
117 | } |
118 | #endif |
119 | |
120 | |
121 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
122 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ |
123 | SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) |
124 | |
125 | #if defined(CONFIG_DEBUG_SLAB) |
126 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
127 | #elif defined(CONFIG_SLUB_DEBUG) |
128 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
129 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
130 | #else |
131 | #define SLAB_DEBUG_FLAGS (0) |
132 | #endif |
133 | |
134 | #if defined(CONFIG_SLAB) |
135 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ |
136 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
137 | SLAB_NOTRACK | SLAB_ACCOUNT) |
138 | #elif defined(CONFIG_SLUB) |
139 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ |
140 | SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) |
141 | #else |
142 | #define SLAB_CACHE_FLAGS (0) |
143 | #endif |
144 | |
145 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
146 | |
147 | int __kmem_cache_shutdown(struct kmem_cache *); |
148 | void __kmem_cache_release(struct kmem_cache *); |
149 | int __kmem_cache_shrink(struct kmem_cache *); |
150 | void slab_kmem_cache_release(struct kmem_cache *); |
151 | |
152 | struct seq_file; |
153 | struct file; |
154 | |
155 | struct slabinfo { |
156 | unsigned long active_objs; |
157 | unsigned long num_objs; |
158 | unsigned long active_slabs; |
159 | unsigned long num_slabs; |
160 | unsigned long shared_avail; |
161 | unsigned int limit; |
162 | unsigned int batchcount; |
163 | unsigned int shared; |
164 | unsigned int objects_per_slab; |
165 | unsigned int cache_order; |
166 | }; |
167 | |
168 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); |
169 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); |
170 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
171 | size_t count, loff_t *ppos); |
172 | |
173 | /* |
174 | * Generic implementation of bulk operations |
175 | * These are useful for situations in which the allocator cannot |
176 | * perform optimizations. In that case segments of the object listed |
177 | * may be allocated or freed using these operations. |
178 | */ |
179 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
180 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
181 | |
182 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
183 | /* |
184 | * Iterate over all memcg caches of the given root cache. The caller must hold |
185 | * slab_mutex. |
186 | */ |
187 | #define for_each_memcg_cache(iter, root) \ |
188 | list_for_each_entry(iter, &(root)->memcg_params.list, \ |
189 | memcg_params.list) |
190 | |
191 | static inline bool is_root_cache(struct kmem_cache *s) |
192 | { |
193 | return s->memcg_params.is_root_cache; |
194 | } |
195 | |
196 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
197 | struct kmem_cache *p) |
198 | { |
199 | return p == s || p == s->memcg_params.root_cache; |
200 | } |
201 | |
202 | /* |
203 | * We use suffixes to the name in memcg because we can't have caches |
204 | * created in the system with the same name. But when we print them |
205 | * locally, better refer to them with the base name |
206 | */ |
207 | static inline const char *cache_name(struct kmem_cache *s) |
208 | { |
209 | if (!is_root_cache(s)) |
210 | s = s->memcg_params.root_cache; |
211 | return s->name; |
212 | } |
213 | |
214 | /* |
215 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. |
216 | * That said the caller must assure the memcg's cache won't go away by either |
217 | * taking a css reference to the owner cgroup, or holding the slab_mutex. |
218 | */ |
219 | static inline struct kmem_cache * |
220 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
221 | { |
222 | struct kmem_cache *cachep; |
223 | struct memcg_cache_array *arr; |
224 | |
225 | rcu_read_lock(); |
226 | arr = rcu_dereference(s->memcg_params.memcg_caches); |
227 | |
228 | /* |
229 | * Make sure we will access the up-to-date value. The code updating |
230 | * memcg_caches issues a write barrier to match this (see |
231 | * memcg_create_kmem_cache()). |
232 | */ |
233 | cachep = lockless_dereference(arr->entries[idx]); |
234 | rcu_read_unlock(); |
235 | |
236 | return cachep; |
237 | } |
238 | |
239 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
240 | { |
241 | if (is_root_cache(s)) |
242 | return s; |
243 | return s->memcg_params.root_cache; |
244 | } |
245 | |
246 | static __always_inline int memcg_charge_slab(struct page *page, |
247 | gfp_t gfp, int order, |
248 | struct kmem_cache *s) |
249 | { |
250 | int ret; |
251 | |
252 | if (!memcg_kmem_enabled()) |
253 | return 0; |
254 | if (is_root_cache(s)) |
255 | return 0; |
256 | |
257 | ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); |
258 | if (ret) |
259 | return ret; |
260 | |
261 | memcg_kmem_update_page_stat(page, |
262 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
263 | MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, |
264 | 1 << order); |
265 | return 0; |
266 | } |
267 | |
268 | static __always_inline void memcg_uncharge_slab(struct page *page, int order, |
269 | struct kmem_cache *s) |
270 | { |
271 | if (!memcg_kmem_enabled()) |
272 | return; |
273 | |
274 | memcg_kmem_update_page_stat(page, |
275 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
276 | MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, |
277 | -(1 << order)); |
278 | memcg_kmem_uncharge(page, order); |
279 | } |
280 | |
281 | extern void slab_init_memcg_params(struct kmem_cache *); |
282 | |
283 | #else /* CONFIG_MEMCG && !CONFIG_SLOB */ |
284 | |
285 | #define for_each_memcg_cache(iter, root) \ |
286 | for ((void)(iter), (void)(root); 0; ) |
287 | |
288 | static inline bool is_root_cache(struct kmem_cache *s) |
289 | { |
290 | return true; |
291 | } |
292 | |
293 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
294 | struct kmem_cache *p) |
295 | { |
296 | return true; |
297 | } |
298 | |
299 | static inline const char *cache_name(struct kmem_cache *s) |
300 | { |
301 | return s->name; |
302 | } |
303 | |
304 | static inline struct kmem_cache * |
305 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
306 | { |
307 | return NULL; |
308 | } |
309 | |
310 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
311 | { |
312 | return s; |
313 | } |
314 | |
315 | static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, |
316 | struct kmem_cache *s) |
317 | { |
318 | return 0; |
319 | } |
320 | |
321 | static inline void memcg_uncharge_slab(struct page *page, int order, |
322 | struct kmem_cache *s) |
323 | { |
324 | } |
325 | |
326 | static inline void slab_init_memcg_params(struct kmem_cache *s) |
327 | { |
328 | } |
329 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
330 | |
331 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
332 | { |
333 | struct kmem_cache *cachep; |
334 | struct page *page; |
335 | |
336 | /* |
337 | * When kmemcg is not being used, both assignments should return the |
338 | * same value. but we don't want to pay the assignment price in that |
339 | * case. If it is not compiled in, the compiler should be smart enough |
340 | * to not do even the assignment. In that case, slab_equal_or_root |
341 | * will also be a constant. |
342 | */ |
343 | if (!memcg_kmem_enabled() && |
344 | !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) |
345 | return s; |
346 | |
347 | page = virt_to_head_page(x); |
348 | cachep = page->slab_cache; |
349 | if (slab_equal_or_root(cachep, s)) |
350 | return cachep; |
351 | |
352 | pr_err("%s: Wrong slab cache. %s but object is from %s\n", |
353 | __func__, s->name, cachep->name); |
354 | WARN_ON_ONCE(1); |
355 | return s; |
356 | } |
357 | |
358 | static inline size_t slab_ksize(const struct kmem_cache *s) |
359 | { |
360 | #ifndef CONFIG_SLUB |
361 | return s->object_size; |
362 | |
363 | #else /* CONFIG_SLUB */ |
364 | # ifdef CONFIG_SLUB_DEBUG |
365 | /* |
366 | * Debugging requires use of the padding between object |
367 | * and whatever may come after it. |
368 | */ |
369 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) |
370 | return s->object_size; |
371 | # endif |
372 | if (s->flags & SLAB_KASAN) |
373 | return s->object_size; |
374 | /* |
375 | * If we have the need to store the freelist pointer |
376 | * back there or track user information then we can |
377 | * only use the space before that information. |
378 | */ |
379 | if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) |
380 | return s->inuse; |
381 | /* |
382 | * Else we can use all the padding etc for the allocation |
383 | */ |
384 | return s->size; |
385 | #endif |
386 | } |
387 | |
388 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, |
389 | gfp_t flags) |
390 | { |
391 | flags &= gfp_allowed_mask; |
392 | lockdep_trace_alloc(flags); |
393 | might_sleep_if(gfpflags_allow_blocking(flags)); |
394 | |
395 | if (should_failslab(s, flags)) |
396 | return NULL; |
397 | |
398 | if (memcg_kmem_enabled() && |
399 | ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) |
400 | return memcg_kmem_get_cache(s); |
401 | |
402 | return s; |
403 | } |
404 | |
405 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, |
406 | size_t size, void **p) |
407 | { |
408 | size_t i; |
409 | |
410 | flags &= gfp_allowed_mask; |
411 | for (i = 0; i < size; i++) { |
412 | void *object = p[i]; |
413 | |
414 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); |
415 | kmemleak_alloc_recursive(object, s->object_size, 1, |
416 | s->flags, flags); |
417 | kasan_slab_alloc(s, object, flags); |
418 | } |
419 | |
420 | if (memcg_kmem_enabled()) |
421 | memcg_kmem_put_cache(s); |
422 | } |
423 | |
424 | #ifndef CONFIG_SLOB |
425 | /* |
426 | * The slab lists for all objects. |
427 | */ |
428 | struct kmem_cache_node { |
429 | spinlock_t list_lock; |
430 | |
431 | #ifdef CONFIG_SLAB |
432 | struct list_head slabs_partial; /* partial list first, better asm code */ |
433 | struct list_head slabs_full; |
434 | struct list_head slabs_free; |
435 | unsigned long num_slabs; |
436 | unsigned long free_objects; |
437 | unsigned int free_limit; |
438 | unsigned int colour_next; /* Per-node cache coloring */ |
439 | struct array_cache *shared; /* shared per node */ |
440 | struct alien_cache **alien; /* on other nodes */ |
441 | unsigned long next_reap; /* updated without locking */ |
442 | int free_touched; /* updated without locking */ |
443 | #endif |
444 | |
445 | #ifdef CONFIG_SLUB |
446 | unsigned long nr_partial; |
447 | struct list_head partial; |
448 | #ifdef CONFIG_SLUB_DEBUG |
449 | atomic_long_t nr_slabs; |
450 | atomic_long_t total_objects; |
451 | struct list_head full; |
452 | #endif |
453 | #endif |
454 | |
455 | }; |
456 | |
457 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
458 | { |
459 | return s->node[node]; |
460 | } |
461 | |
462 | /* |
463 | * Iterator over all nodes. The body will be executed for each node that has |
464 | * a kmem_cache_node structure allocated (which is true for all online nodes) |
465 | */ |
466 | #define for_each_kmem_cache_node(__s, __node, __n) \ |
467 | for (__node = 0; __node < nr_node_ids; __node++) \ |
468 | if ((__n = get_node(__s, __node))) |
469 | |
470 | #endif |
471 | |
472 | void *slab_start(struct seq_file *m, loff_t *pos); |
473 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
474 | void slab_stop(struct seq_file *m, void *p); |
475 | int memcg_slab_show(struct seq_file *m, void *p); |
476 | |
477 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
478 | |
479 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
480 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, |
481 | gfp_t gfp); |
482 | void cache_random_seq_destroy(struct kmem_cache *cachep); |
483 | #else |
484 | static inline int cache_random_seq_create(struct kmem_cache *cachep, |
485 | unsigned int count, gfp_t gfp) |
486 | { |
487 | return 0; |
488 | } |
489 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } |
490 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ |
491 | |
492 | #endif /* MM_SLAB_H */ |
493 |