blob: 194e2f24841bb48e158f6f24ffed4e7c3dbdae87
1 | /* |
2 | * kernel/cpuset.c |
3 | * |
4 | * Processor and Memory placement constraints for sets of tasks. |
5 | * |
6 | * Copyright (C) 2003 BULL SA. |
7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
8 | * Copyright (C) 2006 Google, Inc |
9 | * |
10 | * Portions derived from Patrick Mochel's sysfs code. |
11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel |
12 | * |
13 | * 2003-10-10 Written by Simon Derr. |
14 | * 2003-10-22 Updates by Stephen Hemminger. |
15 | * 2004 May-July Rework by Paul Jackson. |
16 | * 2006 Rework by Paul Menage to use generic cgroups |
17 | * 2008 Rework of the scheduler domains and CPU hotplug handling |
18 | * by Max Krasnyansky |
19 | * |
20 | * This file is subject to the terms and conditions of the GNU General Public |
21 | * License. See the file COPYING in the main directory of the Linux |
22 | * distribution for more details. |
23 | */ |
24 | |
25 | #include <linux/cpu.h> |
26 | #include <linux/cpumask.h> |
27 | #include <linux/cpuset.h> |
28 | #include <linux/err.h> |
29 | #include <linux/errno.h> |
30 | #include <linux/file.h> |
31 | #include <linux/fs.h> |
32 | #include <linux/init.h> |
33 | #include <linux/interrupt.h> |
34 | #include <linux/kernel.h> |
35 | #include <linux/kmod.h> |
36 | #include <linux/list.h> |
37 | #include <linux/mempolicy.h> |
38 | #include <linux/mm.h> |
39 | #include <linux/memory.h> |
40 | #include <linux/export.h> |
41 | #include <linux/mount.h> |
42 | #include <linux/namei.h> |
43 | #include <linux/pagemap.h> |
44 | #include <linux/proc_fs.h> |
45 | #include <linux/rcupdate.h> |
46 | #include <linux/sched.h> |
47 | #include <linux/seq_file.h> |
48 | #include <linux/security.h> |
49 | #include <linux/slab.h> |
50 | #include <linux/spinlock.h> |
51 | #include <linux/stat.h> |
52 | #include <linux/string.h> |
53 | #include <linux/time.h> |
54 | #include <linux/time64.h> |
55 | #include <linux/backing-dev.h> |
56 | #include <linux/sort.h> |
57 | |
58 | #include <asm/uaccess.h> |
59 | #include <linux/atomic.h> |
60 | #include <linux/mutex.h> |
61 | #include <linux/cgroup.h> |
62 | #include <linux/wait.h> |
63 | |
64 | DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); |
65 | DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); |
66 | |
67 | /* See "Frequency meter" comments, below. */ |
68 | |
69 | struct fmeter { |
70 | int cnt; /* unprocessed events count */ |
71 | int val; /* most recent output value */ |
72 | time64_t time; /* clock (secs) when val computed */ |
73 | spinlock_t lock; /* guards read or write of above */ |
74 | }; |
75 | |
76 | struct cpuset { |
77 | struct cgroup_subsys_state css; |
78 | |
79 | unsigned long flags; /* "unsigned long" so bitops work */ |
80 | |
81 | /* |
82 | * On default hierarchy: |
83 | * |
84 | * The user-configured masks can only be changed by writing to |
85 | * cpuset.cpus and cpuset.mems, and won't be limited by the |
86 | * parent masks. |
87 | * |
88 | * The effective masks is the real masks that apply to the tasks |
89 | * in the cpuset. They may be changed if the configured masks are |
90 | * changed or hotplug happens. |
91 | * |
92 | * effective_mask == configured_mask & parent's effective_mask, |
93 | * and if it ends up empty, it will inherit the parent's mask. |
94 | * |
95 | * |
96 | * On legacy hierachy: |
97 | * |
98 | * The user-configured masks are always the same with effective masks. |
99 | */ |
100 | |
101 | /* user-configured CPUs and Memory Nodes allow to tasks */ |
102 | cpumask_var_t cpus_allowed; |
103 | cpumask_var_t cpus_requested; |
104 | nodemask_t mems_allowed; |
105 | |
106 | /* effective CPUs and Memory Nodes allow to tasks */ |
107 | cpumask_var_t effective_cpus; |
108 | nodemask_t effective_mems; |
109 | |
110 | /* |
111 | * This is old Memory Nodes tasks took on. |
112 | * |
113 | * - top_cpuset.old_mems_allowed is initialized to mems_allowed. |
114 | * - A new cpuset's old_mems_allowed is initialized when some |
115 | * task is moved into it. |
116 | * - old_mems_allowed is used in cpuset_migrate_mm() when we change |
117 | * cpuset.mems_allowed and have tasks' nodemask updated, and |
118 | * then old_mems_allowed is updated to mems_allowed. |
119 | */ |
120 | nodemask_t old_mems_allowed; |
121 | |
122 | struct fmeter fmeter; /* memory_pressure filter */ |
123 | |
124 | /* |
125 | * Tasks are being attached to this cpuset. Used to prevent |
126 | * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). |
127 | */ |
128 | int attach_in_progress; |
129 | |
130 | /* partition number for rebuild_sched_domains() */ |
131 | int pn; |
132 | |
133 | /* for custom sched domain */ |
134 | int relax_domain_level; |
135 | }; |
136 | |
137 | static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) |
138 | { |
139 | return css ? container_of(css, struct cpuset, css) : NULL; |
140 | } |
141 | |
142 | /* Retrieve the cpuset for a task */ |
143 | static inline struct cpuset *task_cs(struct task_struct *task) |
144 | { |
145 | return css_cs(task_css(task, cpuset_cgrp_id)); |
146 | } |
147 | |
148 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
149 | { |
150 | return css_cs(cs->css.parent); |
151 | } |
152 | |
153 | #ifdef CONFIG_NUMA |
154 | static inline bool task_has_mempolicy(struct task_struct *task) |
155 | { |
156 | return task->mempolicy; |
157 | } |
158 | #else |
159 | static inline bool task_has_mempolicy(struct task_struct *task) |
160 | { |
161 | return false; |
162 | } |
163 | #endif |
164 | |
165 | |
166 | /* bits in struct cpuset flags field */ |
167 | typedef enum { |
168 | CS_ONLINE, |
169 | CS_CPU_EXCLUSIVE, |
170 | CS_MEM_EXCLUSIVE, |
171 | CS_MEM_HARDWALL, |
172 | CS_MEMORY_MIGRATE, |
173 | CS_SCHED_LOAD_BALANCE, |
174 | CS_SPREAD_PAGE, |
175 | CS_SPREAD_SLAB, |
176 | } cpuset_flagbits_t; |
177 | |
178 | /* convenient tests for these bits */ |
179 | static inline bool is_cpuset_online(struct cpuset *cs) |
180 | { |
181 | return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
182 | } |
183 | |
184 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
185 | { |
186 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
187 | } |
188 | |
189 | static inline int is_mem_exclusive(const struct cpuset *cs) |
190 | { |
191 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
192 | } |
193 | |
194 | static inline int is_mem_hardwall(const struct cpuset *cs) |
195 | { |
196 | return test_bit(CS_MEM_HARDWALL, &cs->flags); |
197 | } |
198 | |
199 | static inline int is_sched_load_balance(const struct cpuset *cs) |
200 | { |
201 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
202 | } |
203 | |
204 | static inline int is_memory_migrate(const struct cpuset *cs) |
205 | { |
206 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
207 | } |
208 | |
209 | static inline int is_spread_page(const struct cpuset *cs) |
210 | { |
211 | return test_bit(CS_SPREAD_PAGE, &cs->flags); |
212 | } |
213 | |
214 | static inline int is_spread_slab(const struct cpuset *cs) |
215 | { |
216 | return test_bit(CS_SPREAD_SLAB, &cs->flags); |
217 | } |
218 | |
219 | static struct cpuset top_cpuset = { |
220 | .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | |
221 | (1 << CS_MEM_EXCLUSIVE)), |
222 | }; |
223 | |
224 | /** |
225 | * cpuset_for_each_child - traverse online children of a cpuset |
226 | * @child_cs: loop cursor pointing to the current child |
227 | * @pos_css: used for iteration |
228 | * @parent_cs: target cpuset to walk children of |
229 | * |
230 | * Walk @child_cs through the online children of @parent_cs. Must be used |
231 | * with RCU read locked. |
232 | */ |
233 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
234 | css_for_each_child((pos_css), &(parent_cs)->css) \ |
235 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) |
236 | |
237 | /** |
238 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants |
239 | * @des_cs: loop cursor pointing to the current descendant |
240 | * @pos_css: used for iteration |
241 | * @root_cs: target cpuset to walk ancestor of |
242 | * |
243 | * Walk @des_cs through the online descendants of @root_cs. Must be used |
244 | * with RCU read locked. The caller may modify @pos_css by calling |
245 | * css_rightmost_descendant() to skip subtree. @root_cs is included in the |
246 | * iteration and the first node to be visited. |
247 | */ |
248 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
249 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ |
250 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) |
251 | |
252 | /* |
253 | * There are two global locks guarding cpuset structures - cpuset_mutex and |
254 | * callback_lock. We also require taking task_lock() when dereferencing a |
255 | * task's cpuset pointer. See "The task_lock() exception", at the end of this |
256 | * comment. |
257 | * |
258 | * A task must hold both locks to modify cpusets. If a task holds |
259 | * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it |
260 | * is the only task able to also acquire callback_lock and be able to |
261 | * modify cpusets. It can perform various checks on the cpuset structure |
262 | * first, knowing nothing will change. It can also allocate memory while |
263 | * just holding cpuset_mutex. While it is performing these checks, various |
264 | * callback routines can briefly acquire callback_lock to query cpusets. |
265 | * Once it is ready to make the changes, it takes callback_lock, blocking |
266 | * everyone else. |
267 | * |
268 | * Calls to the kernel memory allocator can not be made while holding |
269 | * callback_lock, as that would risk double tripping on callback_lock |
270 | * from one of the callbacks into the cpuset code from within |
271 | * __alloc_pages(). |
272 | * |
273 | * If a task is only holding callback_lock, then it has read-only |
274 | * access to cpusets. |
275 | * |
276 | * Now, the task_struct fields mems_allowed and mempolicy may be changed |
277 | * by other task, we use alloc_lock in the task_struct fields to protect |
278 | * them. |
279 | * |
280 | * The cpuset_common_file_read() handlers only hold callback_lock across |
281 | * small pieces of code, such as when reading out possibly multi-word |
282 | * cpumasks and nodemasks. |
283 | * |
284 | * Accessing a task's cpuset should be done in accordance with the |
285 | * guidelines for accessing subsystem state in kernel/cgroup.c |
286 | */ |
287 | |
288 | static DEFINE_MUTEX(cpuset_mutex); |
289 | static DEFINE_SPINLOCK(callback_lock); |
290 | |
291 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
292 | |
293 | /* |
294 | * CPU / memory hotplug is handled asynchronously. |
295 | */ |
296 | static void cpuset_hotplug_workfn(struct work_struct *work); |
297 | static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); |
298 | |
299 | static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); |
300 | |
301 | /* |
302 | * This is ugly, but preserves the userspace API for existing cpuset |
303 | * users. If someone tries to mount the "cpuset" filesystem, we |
304 | * silently switch it to mount "cgroup" instead |
305 | */ |
306 | static struct dentry *cpuset_mount(struct file_system_type *fs_type, |
307 | int flags, const char *unused_dev_name, void *data) |
308 | { |
309 | struct file_system_type *cgroup_fs = get_fs_type("cgroup"); |
310 | struct dentry *ret = ERR_PTR(-ENODEV); |
311 | if (cgroup_fs) { |
312 | char mountopts[] = |
313 | "cpuset,noprefix," |
314 | "release_agent=/sbin/cpuset_release_agent"; |
315 | ret = cgroup_fs->mount(cgroup_fs, flags, |
316 | unused_dev_name, mountopts); |
317 | put_filesystem(cgroup_fs); |
318 | } |
319 | return ret; |
320 | } |
321 | |
322 | static struct file_system_type cpuset_fs_type = { |
323 | .name = "cpuset", |
324 | .mount = cpuset_mount, |
325 | }; |
326 | |
327 | /* |
328 | * Return in pmask the portion of a cpusets's cpus_allowed that |
329 | * are online. If none are online, walk up the cpuset hierarchy |
330 | * until we find one that does have some online cpus. |
331 | * |
332 | * One way or another, we guarantee to return some non-empty subset |
333 | * of cpu_online_mask. |
334 | * |
335 | * Call with callback_lock or cpuset_mutex held. |
336 | */ |
337 | static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) |
338 | { |
339 | while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { |
340 | cs = parent_cs(cs); |
341 | if (unlikely(!cs)) { |
342 | /* |
343 | * The top cpuset doesn't have any online cpu as a |
344 | * consequence of a race between cpuset_hotplug_work |
345 | * and cpu hotplug notifier. But we know the top |
346 | * cpuset's effective_cpus is on its way to to be |
347 | * identical to cpu_online_mask. |
348 | */ |
349 | cpumask_copy(pmask, cpu_online_mask); |
350 | return; |
351 | } |
352 | } |
353 | cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); |
354 | } |
355 | |
356 | /* |
357 | * Return in *pmask the portion of a cpusets's mems_allowed that |
358 | * are online, with memory. If none are online with memory, walk |
359 | * up the cpuset hierarchy until we find one that does have some |
360 | * online mems. The top cpuset always has some mems online. |
361 | * |
362 | * One way or another, we guarantee to return some non-empty subset |
363 | * of node_states[N_MEMORY]. |
364 | * |
365 | * Call with callback_lock or cpuset_mutex held. |
366 | */ |
367 | static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) |
368 | { |
369 | while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) |
370 | cs = parent_cs(cs); |
371 | nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); |
372 | } |
373 | |
374 | /* |
375 | * update task's spread flag if cpuset's page/slab spread flag is set |
376 | * |
377 | * Call with callback_lock or cpuset_mutex held. |
378 | */ |
379 | static void cpuset_update_task_spread_flag(struct cpuset *cs, |
380 | struct task_struct *tsk) |
381 | { |
382 | if (is_spread_page(cs)) |
383 | task_set_spread_page(tsk); |
384 | else |
385 | task_clear_spread_page(tsk); |
386 | |
387 | if (is_spread_slab(cs)) |
388 | task_set_spread_slab(tsk); |
389 | else |
390 | task_clear_spread_slab(tsk); |
391 | } |
392 | |
393 | /* |
394 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? |
395 | * |
396 | * One cpuset is a subset of another if all its allowed CPUs and |
397 | * Memory Nodes are a subset of the other, and its exclusive flags |
398 | * are only set if the other's are set. Call holding cpuset_mutex. |
399 | */ |
400 | |
401 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) |
402 | { |
403 | return cpumask_subset(p->cpus_requested, q->cpus_requested) && |
404 | nodes_subset(p->mems_allowed, q->mems_allowed) && |
405 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && |
406 | is_mem_exclusive(p) <= is_mem_exclusive(q); |
407 | } |
408 | |
409 | /** |
410 | * alloc_trial_cpuset - allocate a trial cpuset |
411 | * @cs: the cpuset that the trial cpuset duplicates |
412 | */ |
413 | static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) |
414 | { |
415 | struct cpuset *trial; |
416 | |
417 | trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); |
418 | if (!trial) |
419 | return NULL; |
420 | |
421 | if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) |
422 | goto free_cs; |
423 | if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL)) |
424 | goto free_cpus; |
425 | |
426 | cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); |
427 | cpumask_copy(trial->effective_cpus, cs->effective_cpus); |
428 | return trial; |
429 | |
430 | free_cpus: |
431 | free_cpumask_var(trial->cpus_allowed); |
432 | free_cs: |
433 | kfree(trial); |
434 | return NULL; |
435 | } |
436 | |
437 | /** |
438 | * free_trial_cpuset - free the trial cpuset |
439 | * @trial: the trial cpuset to be freed |
440 | */ |
441 | static void free_trial_cpuset(struct cpuset *trial) |
442 | { |
443 | free_cpumask_var(trial->effective_cpus); |
444 | free_cpumask_var(trial->cpus_allowed); |
445 | kfree(trial); |
446 | } |
447 | |
448 | /* |
449 | * validate_change() - Used to validate that any proposed cpuset change |
450 | * follows the structural rules for cpusets. |
451 | * |
452 | * If we replaced the flag and mask values of the current cpuset |
453 | * (cur) with those values in the trial cpuset (trial), would |
454 | * our various subset and exclusive rules still be valid? Presumes |
455 | * cpuset_mutex held. |
456 | * |
457 | * 'cur' is the address of an actual, in-use cpuset. Operations |
458 | * such as list traversal that depend on the actual address of the |
459 | * cpuset in the list must use cur below, not trial. |
460 | * |
461 | * 'trial' is the address of bulk structure copy of cur, with |
462 | * perhaps one or more of the fields cpus_allowed, mems_allowed, |
463 | * or flags changed to new, trial values. |
464 | * |
465 | * Return 0 if valid, -errno if not. |
466 | */ |
467 | |
468 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
469 | { |
470 | struct cgroup_subsys_state *css; |
471 | struct cpuset *c, *par; |
472 | int ret; |
473 | |
474 | rcu_read_lock(); |
475 | |
476 | /* Each of our child cpusets must be a subset of us */ |
477 | ret = -EBUSY; |
478 | cpuset_for_each_child(c, css, cur) |
479 | if (!is_cpuset_subset(c, trial)) |
480 | goto out; |
481 | |
482 | /* Remaining checks don't apply to root cpuset */ |
483 | ret = 0; |
484 | if (cur == &top_cpuset) |
485 | goto out; |
486 | |
487 | par = parent_cs(cur); |
488 | |
489 | /* On legacy hiearchy, we must be a subset of our parent cpuset. */ |
490 | ret = -EACCES; |
491 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
492 | !is_cpuset_subset(trial, par)) |
493 | goto out; |
494 | |
495 | /* |
496 | * If either I or some sibling (!= me) is exclusive, we can't |
497 | * overlap |
498 | */ |
499 | ret = -EINVAL; |
500 | cpuset_for_each_child(c, css, par) { |
501 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
502 | c != cur && |
503 | cpumask_intersects(trial->cpus_requested, c->cpus_requested)) |
504 | goto out; |
505 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && |
506 | c != cur && |
507 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) |
508 | goto out; |
509 | } |
510 | |
511 | /* |
512 | * Cpusets with tasks - existing or newly being attached - can't |
513 | * be changed to have empty cpus_allowed or mems_allowed. |
514 | */ |
515 | ret = -ENOSPC; |
516 | if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { |
517 | if (!cpumask_empty(cur->cpus_allowed) && |
518 | cpumask_empty(trial->cpus_allowed)) |
519 | goto out; |
520 | if (!nodes_empty(cur->mems_allowed) && |
521 | nodes_empty(trial->mems_allowed)) |
522 | goto out; |
523 | } |
524 | |
525 | /* |
526 | * We can't shrink if we won't have enough room for SCHED_DEADLINE |
527 | * tasks. |
528 | */ |
529 | ret = -EBUSY; |
530 | if (is_cpu_exclusive(cur) && |
531 | !cpuset_cpumask_can_shrink(cur->cpus_allowed, |
532 | trial->cpus_allowed)) |
533 | goto out; |
534 | |
535 | ret = 0; |
536 | out: |
537 | rcu_read_unlock(); |
538 | return ret; |
539 | } |
540 | |
541 | #ifdef CONFIG_SMP |
542 | /* |
543 | * Helper routine for generate_sched_domains(). |
544 | * Do cpusets a, b have overlapping effective cpus_allowed masks? |
545 | */ |
546 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
547 | { |
548 | return cpumask_intersects(a->effective_cpus, b->effective_cpus); |
549 | } |
550 | |
551 | static void |
552 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) |
553 | { |
554 | if (dattr->relax_domain_level < c->relax_domain_level) |
555 | dattr->relax_domain_level = c->relax_domain_level; |
556 | return; |
557 | } |
558 | |
559 | static void update_domain_attr_tree(struct sched_domain_attr *dattr, |
560 | struct cpuset *root_cs) |
561 | { |
562 | struct cpuset *cp; |
563 | struct cgroup_subsys_state *pos_css; |
564 | |
565 | rcu_read_lock(); |
566 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
567 | /* skip the whole subtree if @cp doesn't have any CPU */ |
568 | if (cpumask_empty(cp->cpus_allowed)) { |
569 | pos_css = css_rightmost_descendant(pos_css); |
570 | continue; |
571 | } |
572 | |
573 | if (is_sched_load_balance(cp)) |
574 | update_domain_attr(dattr, cp); |
575 | } |
576 | rcu_read_unlock(); |
577 | } |
578 | |
579 | /* |
580 | * generate_sched_domains() |
581 | * |
582 | * This function builds a partial partition of the systems CPUs |
583 | * A 'partial partition' is a set of non-overlapping subsets whose |
584 | * union is a subset of that set. |
585 | * The output of this function needs to be passed to kernel/sched/core.c |
586 | * partition_sched_domains() routine, which will rebuild the scheduler's |
587 | * load balancing domains (sched domains) as specified by that partial |
588 | * partition. |
589 | * |
590 | * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt |
591 | * for a background explanation of this. |
592 | * |
593 | * Does not return errors, on the theory that the callers of this |
594 | * routine would rather not worry about failures to rebuild sched |
595 | * domains when operating in the severe memory shortage situations |
596 | * that could cause allocation failures below. |
597 | * |
598 | * Must be called with cpuset_mutex held. |
599 | * |
600 | * The three key local variables below are: |
601 | * q - a linked-list queue of cpuset pointers, used to implement a |
602 | * top-down scan of all cpusets. This scan loads a pointer |
603 | * to each cpuset marked is_sched_load_balance into the |
604 | * array 'csa'. For our purposes, rebuilding the schedulers |
605 | * sched domains, we can ignore !is_sched_load_balance cpusets. |
606 | * csa - (for CpuSet Array) Array of pointers to all the cpusets |
607 | * that need to be load balanced, for convenient iterative |
608 | * access by the subsequent code that finds the best partition, |
609 | * i.e the set of domains (subsets) of CPUs such that the |
610 | * cpus_allowed of every cpuset marked is_sched_load_balance |
611 | * is a subset of one of these domains, while there are as |
612 | * many such domains as possible, each as small as possible. |
613 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to |
614 | * the kernel/sched/core.c routine partition_sched_domains() in a |
615 | * convenient format, that can be easily compared to the prior |
616 | * value to determine what partition elements (sched domains) |
617 | * were changed (added or removed.) |
618 | * |
619 | * Finding the best partition (set of domains): |
620 | * The triple nested loops below over i, j, k scan over the |
621 | * load balanced cpusets (using the array of cpuset pointers in |
622 | * csa[]) looking for pairs of cpusets that have overlapping |
623 | * cpus_allowed, but which don't have the same 'pn' partition |
624 | * number and gives them in the same partition number. It keeps |
625 | * looping on the 'restart' label until it can no longer find |
626 | * any such pairs. |
627 | * |
628 | * The union of the cpus_allowed masks from the set of |
629 | * all cpusets having the same 'pn' value then form the one |
630 | * element of the partition (one sched domain) to be passed to |
631 | * partition_sched_domains(). |
632 | */ |
633 | static int generate_sched_domains(cpumask_var_t **domains, |
634 | struct sched_domain_attr **attributes) |
635 | { |
636 | struct cpuset *cp; /* scans q */ |
637 | struct cpuset **csa; /* array of all cpuset ptrs */ |
638 | int csn; /* how many cpuset ptrs in csa so far */ |
639 | int i, j, k; /* indices for partition finding loops */ |
640 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
641 | cpumask_var_t non_isolated_cpus; /* load balanced CPUs */ |
642 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
643 | int ndoms = 0; /* number of sched domains in result */ |
644 | int nslot; /* next empty doms[] struct cpumask slot */ |
645 | struct cgroup_subsys_state *pos_css; |
646 | |
647 | doms = NULL; |
648 | dattr = NULL; |
649 | csa = NULL; |
650 | |
651 | if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL)) |
652 | goto done; |
653 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
654 | |
655 | /* Special case for the 99% of systems with one, full, sched domain */ |
656 | if (is_sched_load_balance(&top_cpuset)) { |
657 | ndoms = 1; |
658 | doms = alloc_sched_domains(ndoms); |
659 | if (!doms) |
660 | goto done; |
661 | |
662 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
663 | if (dattr) { |
664 | *dattr = SD_ATTR_INIT; |
665 | update_domain_attr_tree(dattr, &top_cpuset); |
666 | } |
667 | cpumask_and(doms[0], top_cpuset.effective_cpus, |
668 | non_isolated_cpus); |
669 | |
670 | goto done; |
671 | } |
672 | |
673 | csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL); |
674 | if (!csa) |
675 | goto done; |
676 | csn = 0; |
677 | |
678 | rcu_read_lock(); |
679 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
680 | if (cp == &top_cpuset) |
681 | continue; |
682 | /* |
683 | * Continue traversing beyond @cp iff @cp has some CPUs and |
684 | * isn't load balancing. The former is obvious. The |
685 | * latter: All child cpusets contain a subset of the |
686 | * parent's cpus, so just skip them, and then we call |
687 | * update_domain_attr_tree() to calc relax_domain_level of |
688 | * the corresponding sched domain. |
689 | */ |
690 | if (!cpumask_empty(cp->cpus_allowed) && |
691 | !(is_sched_load_balance(cp) && |
692 | cpumask_intersects(cp->cpus_allowed, non_isolated_cpus))) |
693 | continue; |
694 | |
695 | if (is_sched_load_balance(cp)) |
696 | csa[csn++] = cp; |
697 | |
698 | /* skip @cp's subtree */ |
699 | pos_css = css_rightmost_descendant(pos_css); |
700 | } |
701 | rcu_read_unlock(); |
702 | |
703 | for (i = 0; i < csn; i++) |
704 | csa[i]->pn = i; |
705 | ndoms = csn; |
706 | |
707 | restart: |
708 | /* Find the best partition (set of sched domains) */ |
709 | for (i = 0; i < csn; i++) { |
710 | struct cpuset *a = csa[i]; |
711 | int apn = a->pn; |
712 | |
713 | for (j = 0; j < csn; j++) { |
714 | struct cpuset *b = csa[j]; |
715 | int bpn = b->pn; |
716 | |
717 | if (apn != bpn && cpusets_overlap(a, b)) { |
718 | for (k = 0; k < csn; k++) { |
719 | struct cpuset *c = csa[k]; |
720 | |
721 | if (c->pn == bpn) |
722 | c->pn = apn; |
723 | } |
724 | ndoms--; /* one less element */ |
725 | goto restart; |
726 | } |
727 | } |
728 | } |
729 | |
730 | /* |
731 | * Now we know how many domains to create. |
732 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
733 | */ |
734 | doms = alloc_sched_domains(ndoms); |
735 | if (!doms) |
736 | goto done; |
737 | |
738 | /* |
739 | * The rest of the code, including the scheduler, can deal with |
740 | * dattr==NULL case. No need to abort if alloc fails. |
741 | */ |
742 | dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); |
743 | |
744 | for (nslot = 0, i = 0; i < csn; i++) { |
745 | struct cpuset *a = csa[i]; |
746 | struct cpumask *dp; |
747 | int apn = a->pn; |
748 | |
749 | if (apn < 0) { |
750 | /* Skip completed partitions */ |
751 | continue; |
752 | } |
753 | |
754 | dp = doms[nslot]; |
755 | |
756 | if (nslot == ndoms) { |
757 | static int warnings = 10; |
758 | if (warnings) { |
759 | pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", |
760 | nslot, ndoms, csn, i, apn); |
761 | warnings--; |
762 | } |
763 | continue; |
764 | } |
765 | |
766 | cpumask_clear(dp); |
767 | if (dattr) |
768 | *(dattr + nslot) = SD_ATTR_INIT; |
769 | for (j = i; j < csn; j++) { |
770 | struct cpuset *b = csa[j]; |
771 | |
772 | if (apn == b->pn) { |
773 | cpumask_or(dp, dp, b->effective_cpus); |
774 | cpumask_and(dp, dp, non_isolated_cpus); |
775 | if (dattr) |
776 | update_domain_attr_tree(dattr + nslot, b); |
777 | |
778 | /* Done with this partition */ |
779 | b->pn = -1; |
780 | } |
781 | } |
782 | nslot++; |
783 | } |
784 | BUG_ON(nslot != ndoms); |
785 | |
786 | done: |
787 | free_cpumask_var(non_isolated_cpus); |
788 | kfree(csa); |
789 | |
790 | /* |
791 | * Fallback to the default domain if kmalloc() failed. |
792 | * See comments in partition_sched_domains(). |
793 | */ |
794 | if (doms == NULL) |
795 | ndoms = 1; |
796 | |
797 | *domains = doms; |
798 | *attributes = dattr; |
799 | return ndoms; |
800 | } |
801 | |
802 | /* |
803 | * Rebuild scheduler domains. |
804 | * |
805 | * If the flag 'sched_load_balance' of any cpuset with non-empty |
806 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset |
807 | * which has that flag enabled, or if any cpuset with a non-empty |
808 | * 'cpus' is removed, then call this routine to rebuild the |
809 | * scheduler's dynamic sched domains. |
810 | * |
811 | * Call with cpuset_mutex held. Takes get_online_cpus(). |
812 | */ |
813 | static void rebuild_sched_domains_locked(void) |
814 | { |
815 | struct sched_domain_attr *attr; |
816 | cpumask_var_t *doms; |
817 | int ndoms; |
818 | |
819 | lockdep_assert_held(&cpuset_mutex); |
820 | get_online_cpus(); |
821 | |
822 | /* |
823 | * We have raced with CPU hotplug. Don't do anything to avoid |
824 | * passing doms with offlined cpu to partition_sched_domains(). |
825 | * Anyways, hotplug work item will rebuild sched domains. |
826 | */ |
827 | if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) |
828 | goto out; |
829 | |
830 | /* Generate domain masks and attrs */ |
831 | ndoms = generate_sched_domains(&doms, &attr); |
832 | |
833 | /* Have scheduler rebuild the domains */ |
834 | partition_sched_domains(ndoms, doms, attr); |
835 | out: |
836 | put_online_cpus(); |
837 | } |
838 | #else /* !CONFIG_SMP */ |
839 | static void rebuild_sched_domains_locked(void) |
840 | { |
841 | } |
842 | #endif /* CONFIG_SMP */ |
843 | |
844 | void rebuild_sched_domains(void) |
845 | { |
846 | mutex_lock(&cpuset_mutex); |
847 | rebuild_sched_domains_locked(); |
848 | mutex_unlock(&cpuset_mutex); |
849 | } |
850 | |
851 | /** |
852 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. |
853 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed |
854 | * |
855 | * Iterate through each task of @cs updating its cpus_allowed to the |
856 | * effective cpuset's. As this function is called with cpuset_mutex held, |
857 | * cpuset membership stays stable. |
858 | */ |
859 | static void update_tasks_cpumask(struct cpuset *cs) |
860 | { |
861 | struct css_task_iter it; |
862 | struct task_struct *task; |
863 | |
864 | css_task_iter_start(&cs->css, &it); |
865 | while ((task = css_task_iter_next(&it))) |
866 | set_cpus_allowed_ptr(task, cs->effective_cpus); |
867 | css_task_iter_end(&it); |
868 | } |
869 | |
870 | /* |
871 | * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree |
872 | * @cs: the cpuset to consider |
873 | * @new_cpus: temp variable for calculating new effective_cpus |
874 | * |
875 | * When congifured cpumask is changed, the effective cpumasks of this cpuset |
876 | * and all its descendants need to be updated. |
877 | * |
878 | * On legacy hierachy, effective_cpus will be the same with cpu_allowed. |
879 | * |
880 | * Called with cpuset_mutex held |
881 | */ |
882 | static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) |
883 | { |
884 | struct cpuset *cp; |
885 | struct cgroup_subsys_state *pos_css; |
886 | bool need_rebuild_sched_domains = false; |
887 | |
888 | rcu_read_lock(); |
889 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
890 | struct cpuset *parent = parent_cs(cp); |
891 | |
892 | cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus); |
893 | |
894 | /* |
895 | * If it becomes empty, inherit the effective mask of the |
896 | * parent, which is guaranteed to have some CPUs. |
897 | */ |
898 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
899 | cpumask_empty(new_cpus)) |
900 | cpumask_copy(new_cpus, parent->effective_cpus); |
901 | |
902 | /* Skip the whole subtree if the cpumask remains the same. */ |
903 | if (cpumask_equal(new_cpus, cp->effective_cpus)) { |
904 | pos_css = css_rightmost_descendant(pos_css); |
905 | continue; |
906 | } |
907 | |
908 | if (!css_tryget_online(&cp->css)) |
909 | continue; |
910 | rcu_read_unlock(); |
911 | |
912 | spin_lock_irq(&callback_lock); |
913 | cpumask_copy(cp->effective_cpus, new_cpus); |
914 | spin_unlock_irq(&callback_lock); |
915 | |
916 | WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
917 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
918 | |
919 | update_tasks_cpumask(cp); |
920 | |
921 | /* |
922 | * If the effective cpumask of any non-empty cpuset is changed, |
923 | * we need to rebuild sched domains. |
924 | */ |
925 | if (!cpumask_empty(cp->cpus_allowed) && |
926 | is_sched_load_balance(cp)) |
927 | need_rebuild_sched_domains = true; |
928 | |
929 | rcu_read_lock(); |
930 | css_put(&cp->css); |
931 | } |
932 | rcu_read_unlock(); |
933 | |
934 | if (need_rebuild_sched_domains) |
935 | rebuild_sched_domains_locked(); |
936 | } |
937 | |
938 | /** |
939 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it |
940 | * @cs: the cpuset to consider |
941 | * @trialcs: trial cpuset |
942 | * @buf: buffer of cpu numbers written to this cpuset |
943 | */ |
944 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
945 | const char *buf) |
946 | { |
947 | int retval; |
948 | |
949 | /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ |
950 | if (cs == &top_cpuset) |
951 | return -EACCES; |
952 | |
953 | /* |
954 | * An empty cpus_allowed is ok only if the cpuset has no tasks. |
955 | * Since cpulist_parse() fails on an empty mask, we special case |
956 | * that parsing. The validate_change() call ensures that cpusets |
957 | * with tasks have cpus. |
958 | */ |
959 | if (!*buf) { |
960 | cpumask_clear(trialcs->cpus_allowed); |
961 | } else { |
962 | retval = cpulist_parse(buf, trialcs->cpus_requested); |
963 | if (retval < 0) |
964 | return retval; |
965 | |
966 | if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask)) |
967 | return -EINVAL; |
968 | |
969 | cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask); |
970 | } |
971 | |
972 | /* Nothing to do if the cpus didn't change */ |
973 | if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) |
974 | return 0; |
975 | |
976 | retval = validate_change(cs, trialcs); |
977 | if (retval < 0) |
978 | return retval; |
979 | |
980 | spin_lock_irq(&callback_lock); |
981 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
982 | cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); |
983 | spin_unlock_irq(&callback_lock); |
984 | |
985 | /* use trialcs->cpus_allowed as a temp variable */ |
986 | update_cpumasks_hier(cs, trialcs->cpus_allowed); |
987 | return 0; |
988 | } |
989 | |
990 | /* |
991 | * Migrate memory region from one set of nodes to another. This is |
992 | * performed asynchronously as it can be called from process migration path |
993 | * holding locks involved in process management. All mm migrations are |
994 | * performed in the queued order and can be waited for by flushing |
995 | * cpuset_migrate_mm_wq. |
996 | */ |
997 | |
998 | struct cpuset_migrate_mm_work { |
999 | struct work_struct work; |
1000 | struct mm_struct *mm; |
1001 | nodemask_t from; |
1002 | nodemask_t to; |
1003 | }; |
1004 | |
1005 | static void cpuset_migrate_mm_workfn(struct work_struct *work) |
1006 | { |
1007 | struct cpuset_migrate_mm_work *mwork = |
1008 | container_of(work, struct cpuset_migrate_mm_work, work); |
1009 | |
1010 | /* on a wq worker, no need to worry about %current's mems_allowed */ |
1011 | do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); |
1012 | mmput(mwork->mm); |
1013 | kfree(mwork); |
1014 | } |
1015 | |
1016 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, |
1017 | const nodemask_t *to) |
1018 | { |
1019 | struct cpuset_migrate_mm_work *mwork; |
1020 | |
1021 | mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); |
1022 | if (mwork) { |
1023 | mwork->mm = mm; |
1024 | mwork->from = *from; |
1025 | mwork->to = *to; |
1026 | INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); |
1027 | queue_work(cpuset_migrate_mm_wq, &mwork->work); |
1028 | } else { |
1029 | mmput(mm); |
1030 | } |
1031 | } |
1032 | |
1033 | static void cpuset_post_attach(void) |
1034 | { |
1035 | flush_workqueue(cpuset_migrate_mm_wq); |
1036 | } |
1037 | |
1038 | /* |
1039 | * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy |
1040 | * @tsk: the task to change |
1041 | * @newmems: new nodes that the task will be set |
1042 | * |
1043 | * In order to avoid seeing no nodes if the old and new nodes are disjoint, |
1044 | * we structure updates as setting all new allowed nodes, then clearing newly |
1045 | * disallowed ones. |
1046 | */ |
1047 | static void cpuset_change_task_nodemask(struct task_struct *tsk, |
1048 | nodemask_t *newmems) |
1049 | { |
1050 | bool need_loop; |
1051 | |
1052 | task_lock(tsk); |
1053 | /* |
1054 | * Determine if a loop is necessary if another thread is doing |
1055 | * read_mems_allowed_begin(). If at least one node remains unchanged and |
1056 | * tsk does not have a mempolicy, then an empty nodemask will not be |
1057 | * possible when mems_allowed is larger than a word. |
1058 | */ |
1059 | need_loop = task_has_mempolicy(tsk) || |
1060 | !nodes_intersects(*newmems, tsk->mems_allowed); |
1061 | |
1062 | if (need_loop) { |
1063 | local_irq_disable(); |
1064 | write_seqcount_begin(&tsk->mems_allowed_seq); |
1065 | } |
1066 | |
1067 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
1068 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); |
1069 | |
1070 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); |
1071 | tsk->mems_allowed = *newmems; |
1072 | |
1073 | if (need_loop) { |
1074 | write_seqcount_end(&tsk->mems_allowed_seq); |
1075 | local_irq_enable(); |
1076 | } |
1077 | |
1078 | task_unlock(tsk); |
1079 | } |
1080 | |
1081 | static void *cpuset_being_rebound; |
1082 | |
1083 | /** |
1084 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. |
1085 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed |
1086 | * |
1087 | * Iterate through each task of @cs updating its mems_allowed to the |
1088 | * effective cpuset's. As this function is called with cpuset_mutex held, |
1089 | * cpuset membership stays stable. |
1090 | */ |
1091 | static void update_tasks_nodemask(struct cpuset *cs) |
1092 | { |
1093 | static nodemask_t newmems; /* protected by cpuset_mutex */ |
1094 | struct css_task_iter it; |
1095 | struct task_struct *task; |
1096 | |
1097 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
1098 | |
1099 | guarantee_online_mems(cs, &newmems); |
1100 | |
1101 | /* |
1102 | * The mpol_rebind_mm() call takes mmap_sem, which we couldn't |
1103 | * take while holding tasklist_lock. Forks can happen - the |
1104 | * mpol_dup() cpuset_being_rebound check will catch such forks, |
1105 | * and rebind their vma mempolicies too. Because we still hold |
1106 | * the global cpuset_mutex, we know that no other rebind effort |
1107 | * will be contending for the global variable cpuset_being_rebound. |
1108 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
1109 | * is idempotent. Also migrate pages in each mm to new nodes. |
1110 | */ |
1111 | css_task_iter_start(&cs->css, &it); |
1112 | while ((task = css_task_iter_next(&it))) { |
1113 | struct mm_struct *mm; |
1114 | bool migrate; |
1115 | |
1116 | cpuset_change_task_nodemask(task, &newmems); |
1117 | |
1118 | mm = get_task_mm(task); |
1119 | if (!mm) |
1120 | continue; |
1121 | |
1122 | migrate = is_memory_migrate(cs); |
1123 | |
1124 | mpol_rebind_mm(mm, &cs->mems_allowed); |
1125 | if (migrate) |
1126 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); |
1127 | else |
1128 | mmput(mm); |
1129 | } |
1130 | css_task_iter_end(&it); |
1131 | |
1132 | /* |
1133 | * All the tasks' nodemasks have been updated, update |
1134 | * cs->old_mems_allowed. |
1135 | */ |
1136 | cs->old_mems_allowed = newmems; |
1137 | |
1138 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
1139 | cpuset_being_rebound = NULL; |
1140 | } |
1141 | |
1142 | /* |
1143 | * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree |
1144 | * @cs: the cpuset to consider |
1145 | * @new_mems: a temp variable for calculating new effective_mems |
1146 | * |
1147 | * When configured nodemask is changed, the effective nodemasks of this cpuset |
1148 | * and all its descendants need to be updated. |
1149 | * |
1150 | * On legacy hiearchy, effective_mems will be the same with mems_allowed. |
1151 | * |
1152 | * Called with cpuset_mutex held |
1153 | */ |
1154 | static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) |
1155 | { |
1156 | struct cpuset *cp; |
1157 | struct cgroup_subsys_state *pos_css; |
1158 | |
1159 | rcu_read_lock(); |
1160 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
1161 | struct cpuset *parent = parent_cs(cp); |
1162 | |
1163 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); |
1164 | |
1165 | /* |
1166 | * If it becomes empty, inherit the effective mask of the |
1167 | * parent, which is guaranteed to have some MEMs. |
1168 | */ |
1169 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
1170 | nodes_empty(*new_mems)) |
1171 | *new_mems = parent->effective_mems; |
1172 | |
1173 | /* Skip the whole subtree if the nodemask remains the same. */ |
1174 | if (nodes_equal(*new_mems, cp->effective_mems)) { |
1175 | pos_css = css_rightmost_descendant(pos_css); |
1176 | continue; |
1177 | } |
1178 | |
1179 | if (!css_tryget_online(&cp->css)) |
1180 | continue; |
1181 | rcu_read_unlock(); |
1182 | |
1183 | spin_lock_irq(&callback_lock); |
1184 | cp->effective_mems = *new_mems; |
1185 | spin_unlock_irq(&callback_lock); |
1186 | |
1187 | WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
1188 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
1189 | |
1190 | update_tasks_nodemask(cp); |
1191 | |
1192 | rcu_read_lock(); |
1193 | css_put(&cp->css); |
1194 | } |
1195 | rcu_read_unlock(); |
1196 | } |
1197 | |
1198 | /* |
1199 | * Handle user request to change the 'mems' memory placement |
1200 | * of a cpuset. Needs to validate the request, update the |
1201 | * cpusets mems_allowed, and for each task in the cpuset, |
1202 | * update mems_allowed and rebind task's mempolicy and any vma |
1203 | * mempolicies and if the cpuset is marked 'memory_migrate', |
1204 | * migrate the tasks pages to the new memory. |
1205 | * |
1206 | * Call with cpuset_mutex held. May take callback_lock during call. |
1207 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, |
1208 | * lock each such tasks mm->mmap_sem, scan its vma's and rebind |
1209 | * their mempolicies to the cpusets new mems_allowed. |
1210 | */ |
1211 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
1212 | const char *buf) |
1213 | { |
1214 | int retval; |
1215 | |
1216 | /* |
1217 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
1218 | * it's read-only |
1219 | */ |
1220 | if (cs == &top_cpuset) { |
1221 | retval = -EACCES; |
1222 | goto done; |
1223 | } |
1224 | |
1225 | /* |
1226 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. |
1227 | * Since nodelist_parse() fails on an empty mask, we special case |
1228 | * that parsing. The validate_change() call ensures that cpusets |
1229 | * with tasks have memory. |
1230 | */ |
1231 | if (!*buf) { |
1232 | nodes_clear(trialcs->mems_allowed); |
1233 | } else { |
1234 | retval = nodelist_parse(buf, trialcs->mems_allowed); |
1235 | if (retval < 0) |
1236 | goto done; |
1237 | |
1238 | if (!nodes_subset(trialcs->mems_allowed, |
1239 | top_cpuset.mems_allowed)) { |
1240 | retval = -EINVAL; |
1241 | goto done; |
1242 | } |
1243 | } |
1244 | |
1245 | if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { |
1246 | retval = 0; /* Too easy - nothing to do */ |
1247 | goto done; |
1248 | } |
1249 | retval = validate_change(cs, trialcs); |
1250 | if (retval < 0) |
1251 | goto done; |
1252 | |
1253 | spin_lock_irq(&callback_lock); |
1254 | cs->mems_allowed = trialcs->mems_allowed; |
1255 | spin_unlock_irq(&callback_lock); |
1256 | |
1257 | /* use trialcs->mems_allowed as a temp variable */ |
1258 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
1259 | done: |
1260 | return retval; |
1261 | } |
1262 | |
1263 | int current_cpuset_is_being_rebound(void) |
1264 | { |
1265 | int ret; |
1266 | |
1267 | rcu_read_lock(); |
1268 | ret = task_cs(current) == cpuset_being_rebound; |
1269 | rcu_read_unlock(); |
1270 | |
1271 | return ret; |
1272 | } |
1273 | |
1274 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
1275 | { |
1276 | #ifdef CONFIG_SMP |
1277 | if (val < -1 || val >= sched_domain_level_max) |
1278 | return -EINVAL; |
1279 | #endif |
1280 | |
1281 | if (val != cs->relax_domain_level) { |
1282 | cs->relax_domain_level = val; |
1283 | if (!cpumask_empty(cs->cpus_allowed) && |
1284 | is_sched_load_balance(cs)) |
1285 | rebuild_sched_domains_locked(); |
1286 | } |
1287 | |
1288 | return 0; |
1289 | } |
1290 | |
1291 | /** |
1292 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
1293 | * @cs: the cpuset in which each task's spread flags needs to be changed |
1294 | * |
1295 | * Iterate through each task of @cs updating its spread flags. As this |
1296 | * function is called with cpuset_mutex held, cpuset membership stays |
1297 | * stable. |
1298 | */ |
1299 | static void update_tasks_flags(struct cpuset *cs) |
1300 | { |
1301 | struct css_task_iter it; |
1302 | struct task_struct *task; |
1303 | |
1304 | css_task_iter_start(&cs->css, &it); |
1305 | while ((task = css_task_iter_next(&it))) |
1306 | cpuset_update_task_spread_flag(cs, task); |
1307 | css_task_iter_end(&it); |
1308 | } |
1309 | |
1310 | /* |
1311 | * update_flag - read a 0 or a 1 in a file and update associated flag |
1312 | * bit: the bit to update (see cpuset_flagbits_t) |
1313 | * cs: the cpuset to update |
1314 | * turning_on: whether the flag is being set or cleared |
1315 | * |
1316 | * Call with cpuset_mutex held. |
1317 | */ |
1318 | |
1319 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
1320 | int turning_on) |
1321 | { |
1322 | struct cpuset *trialcs; |
1323 | int balance_flag_changed; |
1324 | int spread_flag_changed; |
1325 | int err; |
1326 | |
1327 | trialcs = alloc_trial_cpuset(cs); |
1328 | if (!trialcs) |
1329 | return -ENOMEM; |
1330 | |
1331 | if (turning_on) |
1332 | set_bit(bit, &trialcs->flags); |
1333 | else |
1334 | clear_bit(bit, &trialcs->flags); |
1335 | |
1336 | err = validate_change(cs, trialcs); |
1337 | if (err < 0) |
1338 | goto out; |
1339 | |
1340 | balance_flag_changed = (is_sched_load_balance(cs) != |
1341 | is_sched_load_balance(trialcs)); |
1342 | |
1343 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
1344 | || (is_spread_page(cs) != is_spread_page(trialcs))); |
1345 | |
1346 | spin_lock_irq(&callback_lock); |
1347 | cs->flags = trialcs->flags; |
1348 | spin_unlock_irq(&callback_lock); |
1349 | |
1350 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
1351 | rebuild_sched_domains_locked(); |
1352 | |
1353 | if (spread_flag_changed) |
1354 | update_tasks_flags(cs); |
1355 | out: |
1356 | free_trial_cpuset(trialcs); |
1357 | return err; |
1358 | } |
1359 | |
1360 | /* |
1361 | * Frequency meter - How fast is some event occurring? |
1362 | * |
1363 | * These routines manage a digitally filtered, constant time based, |
1364 | * event frequency meter. There are four routines: |
1365 | * fmeter_init() - initialize a frequency meter. |
1366 | * fmeter_markevent() - called each time the event happens. |
1367 | * fmeter_getrate() - returns the recent rate of such events. |
1368 | * fmeter_update() - internal routine used to update fmeter. |
1369 | * |
1370 | * A common data structure is passed to each of these routines, |
1371 | * which is used to keep track of the state required to manage the |
1372 | * frequency meter and its digital filter. |
1373 | * |
1374 | * The filter works on the number of events marked per unit time. |
1375 | * The filter is single-pole low-pass recursive (IIR). The time unit |
1376 | * is 1 second. Arithmetic is done using 32-bit integers scaled to |
1377 | * simulate 3 decimal digits of precision (multiplied by 1000). |
1378 | * |
1379 | * With an FM_COEF of 933, and a time base of 1 second, the filter |
1380 | * has a half-life of 10 seconds, meaning that if the events quit |
1381 | * happening, then the rate returned from the fmeter_getrate() |
1382 | * will be cut in half each 10 seconds, until it converges to zero. |
1383 | * |
1384 | * It is not worth doing a real infinitely recursive filter. If more |
1385 | * than FM_MAXTICKS ticks have elapsed since the last filter event, |
1386 | * just compute FM_MAXTICKS ticks worth, by which point the level |
1387 | * will be stable. |
1388 | * |
1389 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid |
1390 | * arithmetic overflow in the fmeter_update() routine. |
1391 | * |
1392 | * Given the simple 32 bit integer arithmetic used, this meter works |
1393 | * best for reporting rates between one per millisecond (msec) and |
1394 | * one per 32 (approx) seconds. At constant rates faster than one |
1395 | * per msec it maxes out at values just under 1,000,000. At constant |
1396 | * rates between one per msec, and one per second it will stabilize |
1397 | * to a value N*1000, where N is the rate of events per second. |
1398 | * At constant rates between one per second and one per 32 seconds, |
1399 | * it will be choppy, moving up on the seconds that have an event, |
1400 | * and then decaying until the next event. At rates slower than |
1401 | * about one in 32 seconds, it decays all the way back to zero between |
1402 | * each event. |
1403 | */ |
1404 | |
1405 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ |
1406 | #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ |
1407 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ |
1408 | #define FM_SCALE 1000 /* faux fixed point scale */ |
1409 | |
1410 | /* Initialize a frequency meter */ |
1411 | static void fmeter_init(struct fmeter *fmp) |
1412 | { |
1413 | fmp->cnt = 0; |
1414 | fmp->val = 0; |
1415 | fmp->time = 0; |
1416 | spin_lock_init(&fmp->lock); |
1417 | } |
1418 | |
1419 | /* Internal meter update - process cnt events and update value */ |
1420 | static void fmeter_update(struct fmeter *fmp) |
1421 | { |
1422 | time64_t now; |
1423 | u32 ticks; |
1424 | |
1425 | now = ktime_get_seconds(); |
1426 | ticks = now - fmp->time; |
1427 | |
1428 | if (ticks == 0) |
1429 | return; |
1430 | |
1431 | ticks = min(FM_MAXTICKS, ticks); |
1432 | while (ticks-- > 0) |
1433 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; |
1434 | fmp->time = now; |
1435 | |
1436 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; |
1437 | fmp->cnt = 0; |
1438 | } |
1439 | |
1440 | /* Process any previous ticks, then bump cnt by one (times scale). */ |
1441 | static void fmeter_markevent(struct fmeter *fmp) |
1442 | { |
1443 | spin_lock(&fmp->lock); |
1444 | fmeter_update(fmp); |
1445 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); |
1446 | spin_unlock(&fmp->lock); |
1447 | } |
1448 | |
1449 | /* Process any previous ticks, then return current value. */ |
1450 | static int fmeter_getrate(struct fmeter *fmp) |
1451 | { |
1452 | int val; |
1453 | |
1454 | spin_lock(&fmp->lock); |
1455 | fmeter_update(fmp); |
1456 | val = fmp->val; |
1457 | spin_unlock(&fmp->lock); |
1458 | return val; |
1459 | } |
1460 | |
1461 | static struct cpuset *cpuset_attach_old_cs; |
1462 | |
1463 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
1464 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
1465 | { |
1466 | struct cgroup_subsys_state *css; |
1467 | struct cpuset *cs; |
1468 | struct task_struct *task; |
1469 | int ret; |
1470 | |
1471 | /* used later by cpuset_attach() */ |
1472 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
1473 | cs = css_cs(css); |
1474 | |
1475 | mutex_lock(&cpuset_mutex); |
1476 | |
1477 | /* allow moving tasks into an empty cpuset if on default hierarchy */ |
1478 | ret = -ENOSPC; |
1479 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
1480 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
1481 | goto out_unlock; |
1482 | |
1483 | cgroup_taskset_for_each(task, css, tset) { |
1484 | ret = task_can_attach(task, cs->cpus_allowed); |
1485 | if (ret) |
1486 | goto out_unlock; |
1487 | ret = security_task_setscheduler(task); |
1488 | if (ret) |
1489 | goto out_unlock; |
1490 | } |
1491 | |
1492 | /* |
1493 | * Mark attach is in progress. This makes validate_change() fail |
1494 | * changes which zero cpus/mems_allowed. |
1495 | */ |
1496 | cs->attach_in_progress++; |
1497 | ret = 0; |
1498 | out_unlock: |
1499 | mutex_unlock(&cpuset_mutex); |
1500 | return ret; |
1501 | } |
1502 | |
1503 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
1504 | { |
1505 | struct cgroup_subsys_state *css; |
1506 | struct cpuset *cs; |
1507 | |
1508 | cgroup_taskset_first(tset, &css); |
1509 | cs = css_cs(css); |
1510 | |
1511 | mutex_lock(&cpuset_mutex); |
1512 | css_cs(css)->attach_in_progress--; |
1513 | mutex_unlock(&cpuset_mutex); |
1514 | } |
1515 | |
1516 | /* |
1517 | * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() |
1518 | * but we can't allocate it dynamically there. Define it global and |
1519 | * allocate from cpuset_init(). |
1520 | */ |
1521 | static cpumask_var_t cpus_attach; |
1522 | |
1523 | static void cpuset_attach(struct cgroup_taskset *tset) |
1524 | { |
1525 | /* static buf protected by cpuset_mutex */ |
1526 | static nodemask_t cpuset_attach_nodemask_to; |
1527 | struct task_struct *task; |
1528 | struct task_struct *leader; |
1529 | struct cgroup_subsys_state *css; |
1530 | struct cpuset *cs; |
1531 | struct cpuset *oldcs = cpuset_attach_old_cs; |
1532 | |
1533 | cgroup_taskset_first(tset, &css); |
1534 | cs = css_cs(css); |
1535 | |
1536 | mutex_lock(&cpuset_mutex); |
1537 | |
1538 | /* prepare for attach */ |
1539 | if (cs == &top_cpuset) |
1540 | cpumask_copy(cpus_attach, cpu_possible_mask); |
1541 | else |
1542 | guarantee_online_cpus(cs, cpus_attach); |
1543 | |
1544 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); |
1545 | |
1546 | cgroup_taskset_for_each(task, css, tset) { |
1547 | /* |
1548 | * can_attach beforehand should guarantee that this doesn't |
1549 | * fail. TODO: have a better way to handle failure here |
1550 | */ |
1551 | WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); |
1552 | |
1553 | cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); |
1554 | cpuset_update_task_spread_flag(cs, task); |
1555 | } |
1556 | |
1557 | /* |
1558 | * Change mm for all threadgroup leaders. This is expensive and may |
1559 | * sleep and should be moved outside migration path proper. |
1560 | */ |
1561 | cpuset_attach_nodemask_to = cs->effective_mems; |
1562 | cgroup_taskset_for_each_leader(leader, css, tset) { |
1563 | struct mm_struct *mm = get_task_mm(leader); |
1564 | |
1565 | if (mm) { |
1566 | mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); |
1567 | |
1568 | /* |
1569 | * old_mems_allowed is the same with mems_allowed |
1570 | * here, except if this task is being moved |
1571 | * automatically due to hotplug. In that case |
1572 | * @mems_allowed has been updated and is empty, so |
1573 | * @old_mems_allowed is the right nodesets that we |
1574 | * migrate mm from. |
1575 | */ |
1576 | if (is_memory_migrate(cs)) |
1577 | cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, |
1578 | &cpuset_attach_nodemask_to); |
1579 | else |
1580 | mmput(mm); |
1581 | } |
1582 | } |
1583 | |
1584 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
1585 | |
1586 | cs->attach_in_progress--; |
1587 | if (!cs->attach_in_progress) |
1588 | wake_up(&cpuset_attach_wq); |
1589 | |
1590 | mutex_unlock(&cpuset_mutex); |
1591 | } |
1592 | |
1593 | /* The various types of files and directories in a cpuset file system */ |
1594 | |
1595 | typedef enum { |
1596 | FILE_MEMORY_MIGRATE, |
1597 | FILE_CPULIST, |
1598 | FILE_MEMLIST, |
1599 | FILE_EFFECTIVE_CPULIST, |
1600 | FILE_EFFECTIVE_MEMLIST, |
1601 | FILE_CPU_EXCLUSIVE, |
1602 | FILE_MEM_EXCLUSIVE, |
1603 | FILE_MEM_HARDWALL, |
1604 | FILE_SCHED_LOAD_BALANCE, |
1605 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
1606 | FILE_MEMORY_PRESSURE_ENABLED, |
1607 | FILE_MEMORY_PRESSURE, |
1608 | FILE_SPREAD_PAGE, |
1609 | FILE_SPREAD_SLAB, |
1610 | } cpuset_filetype_t; |
1611 | |
1612 | static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, |
1613 | u64 val) |
1614 | { |
1615 | struct cpuset *cs = css_cs(css); |
1616 | cpuset_filetype_t type = cft->private; |
1617 | int retval = 0; |
1618 | |
1619 | mutex_lock(&cpuset_mutex); |
1620 | if (!is_cpuset_online(cs)) { |
1621 | retval = -ENODEV; |
1622 | goto out_unlock; |
1623 | } |
1624 | |
1625 | switch (type) { |
1626 | case FILE_CPU_EXCLUSIVE: |
1627 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
1628 | break; |
1629 | case FILE_MEM_EXCLUSIVE: |
1630 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
1631 | break; |
1632 | case FILE_MEM_HARDWALL: |
1633 | retval = update_flag(CS_MEM_HARDWALL, cs, val); |
1634 | break; |
1635 | case FILE_SCHED_LOAD_BALANCE: |
1636 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
1637 | break; |
1638 | case FILE_MEMORY_MIGRATE: |
1639 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
1640 | break; |
1641 | case FILE_MEMORY_PRESSURE_ENABLED: |
1642 | cpuset_memory_pressure_enabled = !!val; |
1643 | break; |
1644 | case FILE_SPREAD_PAGE: |
1645 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
1646 | break; |
1647 | case FILE_SPREAD_SLAB: |
1648 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
1649 | break; |
1650 | default: |
1651 | retval = -EINVAL; |
1652 | break; |
1653 | } |
1654 | out_unlock: |
1655 | mutex_unlock(&cpuset_mutex); |
1656 | return retval; |
1657 | } |
1658 | |
1659 | static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, |
1660 | s64 val) |
1661 | { |
1662 | struct cpuset *cs = css_cs(css); |
1663 | cpuset_filetype_t type = cft->private; |
1664 | int retval = -ENODEV; |
1665 | |
1666 | mutex_lock(&cpuset_mutex); |
1667 | if (!is_cpuset_online(cs)) |
1668 | goto out_unlock; |
1669 | |
1670 | switch (type) { |
1671 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
1672 | retval = update_relax_domain_level(cs, val); |
1673 | break; |
1674 | default: |
1675 | retval = -EINVAL; |
1676 | break; |
1677 | } |
1678 | out_unlock: |
1679 | mutex_unlock(&cpuset_mutex); |
1680 | return retval; |
1681 | } |
1682 | |
1683 | /* |
1684 | * Common handling for a write to a "cpus" or "mems" file. |
1685 | */ |
1686 | static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, |
1687 | char *buf, size_t nbytes, loff_t off) |
1688 | { |
1689 | struct cpuset *cs = css_cs(of_css(of)); |
1690 | struct cpuset *trialcs; |
1691 | int retval = -ENODEV; |
1692 | |
1693 | buf = strstrip(buf); |
1694 | |
1695 | /* |
1696 | * CPU or memory hotunplug may leave @cs w/o any execution |
1697 | * resources, in which case the hotplug code asynchronously updates |
1698 | * configuration and transfers all tasks to the nearest ancestor |
1699 | * which can execute. |
1700 | * |
1701 | * As writes to "cpus" or "mems" may restore @cs's execution |
1702 | * resources, wait for the previously scheduled operations before |
1703 | * proceeding, so that we don't end up keep removing tasks added |
1704 | * after execution capability is restored. |
1705 | * |
1706 | * cpuset_hotplug_work calls back into cgroup core via |
1707 | * cgroup_transfer_tasks() and waiting for it from a cgroupfs |
1708 | * operation like this one can lead to a deadlock through kernfs |
1709 | * active_ref protection. Let's break the protection. Losing the |
1710 | * protection is okay as we check whether @cs is online after |
1711 | * grabbing cpuset_mutex anyway. This only happens on the legacy |
1712 | * hierarchies. |
1713 | */ |
1714 | css_get(&cs->css); |
1715 | kernfs_break_active_protection(of->kn); |
1716 | flush_work(&cpuset_hotplug_work); |
1717 | |
1718 | mutex_lock(&cpuset_mutex); |
1719 | if (!is_cpuset_online(cs)) |
1720 | goto out_unlock; |
1721 | |
1722 | trialcs = alloc_trial_cpuset(cs); |
1723 | if (!trialcs) { |
1724 | retval = -ENOMEM; |
1725 | goto out_unlock; |
1726 | } |
1727 | |
1728 | switch (of_cft(of)->private) { |
1729 | case FILE_CPULIST: |
1730 | retval = update_cpumask(cs, trialcs, buf); |
1731 | break; |
1732 | case FILE_MEMLIST: |
1733 | retval = update_nodemask(cs, trialcs, buf); |
1734 | break; |
1735 | default: |
1736 | retval = -EINVAL; |
1737 | break; |
1738 | } |
1739 | |
1740 | free_trial_cpuset(trialcs); |
1741 | out_unlock: |
1742 | mutex_unlock(&cpuset_mutex); |
1743 | kernfs_unbreak_active_protection(of->kn); |
1744 | css_put(&cs->css); |
1745 | flush_workqueue(cpuset_migrate_mm_wq); |
1746 | return retval ?: nbytes; |
1747 | } |
1748 | |
1749 | /* |
1750 | * These ascii lists should be read in a single call, by using a user |
1751 | * buffer large enough to hold the entire map. If read in smaller |
1752 | * chunks, there is no guarantee of atomicity. Since the display format |
1753 | * used, list of ranges of sequential numbers, is variable length, |
1754 | * and since these maps can change value dynamically, one could read |
1755 | * gibberish by doing partial reads while a list was changing. |
1756 | */ |
1757 | static int cpuset_common_seq_show(struct seq_file *sf, void *v) |
1758 | { |
1759 | struct cpuset *cs = css_cs(seq_css(sf)); |
1760 | cpuset_filetype_t type = seq_cft(sf)->private; |
1761 | int ret = 0; |
1762 | |
1763 | spin_lock_irq(&callback_lock); |
1764 | |
1765 | switch (type) { |
1766 | case FILE_CPULIST: |
1767 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested)); |
1768 | break; |
1769 | case FILE_MEMLIST: |
1770 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); |
1771 | break; |
1772 | case FILE_EFFECTIVE_CPULIST: |
1773 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); |
1774 | break; |
1775 | case FILE_EFFECTIVE_MEMLIST: |
1776 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); |
1777 | break; |
1778 | default: |
1779 | ret = -EINVAL; |
1780 | } |
1781 | |
1782 | spin_unlock_irq(&callback_lock); |
1783 | return ret; |
1784 | } |
1785 | |
1786 | static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) |
1787 | { |
1788 | struct cpuset *cs = css_cs(css); |
1789 | cpuset_filetype_t type = cft->private; |
1790 | switch (type) { |
1791 | case FILE_CPU_EXCLUSIVE: |
1792 | return is_cpu_exclusive(cs); |
1793 | case FILE_MEM_EXCLUSIVE: |
1794 | return is_mem_exclusive(cs); |
1795 | case FILE_MEM_HARDWALL: |
1796 | return is_mem_hardwall(cs); |
1797 | case FILE_SCHED_LOAD_BALANCE: |
1798 | return is_sched_load_balance(cs); |
1799 | case FILE_MEMORY_MIGRATE: |
1800 | return is_memory_migrate(cs); |
1801 | case FILE_MEMORY_PRESSURE_ENABLED: |
1802 | return cpuset_memory_pressure_enabled; |
1803 | case FILE_MEMORY_PRESSURE: |
1804 | return fmeter_getrate(&cs->fmeter); |
1805 | case FILE_SPREAD_PAGE: |
1806 | return is_spread_page(cs); |
1807 | case FILE_SPREAD_SLAB: |
1808 | return is_spread_slab(cs); |
1809 | default: |
1810 | BUG(); |
1811 | } |
1812 | |
1813 | /* Unreachable but makes gcc happy */ |
1814 | return 0; |
1815 | } |
1816 | |
1817 | static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) |
1818 | { |
1819 | struct cpuset *cs = css_cs(css); |
1820 | cpuset_filetype_t type = cft->private; |
1821 | switch (type) { |
1822 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
1823 | return cs->relax_domain_level; |
1824 | default: |
1825 | BUG(); |
1826 | } |
1827 | |
1828 | /* Unrechable but makes gcc happy */ |
1829 | return 0; |
1830 | } |
1831 | |
1832 | |
1833 | /* |
1834 | * for the common functions, 'private' gives the type of file |
1835 | */ |
1836 | |
1837 | static struct cftype files[] = { |
1838 | { |
1839 | .name = "cpus", |
1840 | .seq_show = cpuset_common_seq_show, |
1841 | .write = cpuset_write_resmask, |
1842 | .max_write_len = (100U + 6 * NR_CPUS), |
1843 | .private = FILE_CPULIST, |
1844 | }, |
1845 | |
1846 | { |
1847 | .name = "mems", |
1848 | .seq_show = cpuset_common_seq_show, |
1849 | .write = cpuset_write_resmask, |
1850 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
1851 | .private = FILE_MEMLIST, |
1852 | }, |
1853 | |
1854 | { |
1855 | .name = "effective_cpus", |
1856 | .seq_show = cpuset_common_seq_show, |
1857 | .private = FILE_EFFECTIVE_CPULIST, |
1858 | }, |
1859 | |
1860 | { |
1861 | .name = "effective_mems", |
1862 | .seq_show = cpuset_common_seq_show, |
1863 | .private = FILE_EFFECTIVE_MEMLIST, |
1864 | }, |
1865 | |
1866 | { |
1867 | .name = "cpu_exclusive", |
1868 | .read_u64 = cpuset_read_u64, |
1869 | .write_u64 = cpuset_write_u64, |
1870 | .private = FILE_CPU_EXCLUSIVE, |
1871 | }, |
1872 | |
1873 | { |
1874 | .name = "mem_exclusive", |
1875 | .read_u64 = cpuset_read_u64, |
1876 | .write_u64 = cpuset_write_u64, |
1877 | .private = FILE_MEM_EXCLUSIVE, |
1878 | }, |
1879 | |
1880 | { |
1881 | .name = "mem_hardwall", |
1882 | .read_u64 = cpuset_read_u64, |
1883 | .write_u64 = cpuset_write_u64, |
1884 | .private = FILE_MEM_HARDWALL, |
1885 | }, |
1886 | |
1887 | { |
1888 | .name = "sched_load_balance", |
1889 | .read_u64 = cpuset_read_u64, |
1890 | .write_u64 = cpuset_write_u64, |
1891 | .private = FILE_SCHED_LOAD_BALANCE, |
1892 | }, |
1893 | |
1894 | { |
1895 | .name = "sched_relax_domain_level", |
1896 | .read_s64 = cpuset_read_s64, |
1897 | .write_s64 = cpuset_write_s64, |
1898 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, |
1899 | }, |
1900 | |
1901 | { |
1902 | .name = "memory_migrate", |
1903 | .read_u64 = cpuset_read_u64, |
1904 | .write_u64 = cpuset_write_u64, |
1905 | .private = FILE_MEMORY_MIGRATE, |
1906 | }, |
1907 | |
1908 | { |
1909 | .name = "memory_pressure", |
1910 | .read_u64 = cpuset_read_u64, |
1911 | .private = FILE_MEMORY_PRESSURE, |
1912 | }, |
1913 | |
1914 | { |
1915 | .name = "memory_spread_page", |
1916 | .read_u64 = cpuset_read_u64, |
1917 | .write_u64 = cpuset_write_u64, |
1918 | .private = FILE_SPREAD_PAGE, |
1919 | }, |
1920 | |
1921 | { |
1922 | .name = "memory_spread_slab", |
1923 | .read_u64 = cpuset_read_u64, |
1924 | .write_u64 = cpuset_write_u64, |
1925 | .private = FILE_SPREAD_SLAB, |
1926 | }, |
1927 | |
1928 | { |
1929 | .name = "memory_pressure_enabled", |
1930 | .flags = CFTYPE_ONLY_ON_ROOT, |
1931 | .read_u64 = cpuset_read_u64, |
1932 | .write_u64 = cpuset_write_u64, |
1933 | .private = FILE_MEMORY_PRESSURE_ENABLED, |
1934 | }, |
1935 | |
1936 | { } /* terminate */ |
1937 | }; |
1938 | |
1939 | /* |
1940 | * cpuset_css_alloc - allocate a cpuset css |
1941 | * cgrp: control group that the new cpuset will be part of |
1942 | */ |
1943 | |
1944 | static struct cgroup_subsys_state * |
1945 | cpuset_css_alloc(struct cgroup_subsys_state *parent_css) |
1946 | { |
1947 | struct cpuset *cs; |
1948 | |
1949 | if (!parent_css) |
1950 | return &top_cpuset.css; |
1951 | |
1952 | cs = kzalloc(sizeof(*cs), GFP_KERNEL); |
1953 | if (!cs) |
1954 | return ERR_PTR(-ENOMEM); |
1955 | if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) |
1956 | goto free_cs; |
1957 | if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL)) |
1958 | goto free_allowed; |
1959 | if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL)) |
1960 | goto free_requested; |
1961 | |
1962 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
1963 | cpumask_clear(cs->cpus_allowed); |
1964 | cpumask_clear(cs->cpus_requested); |
1965 | nodes_clear(cs->mems_allowed); |
1966 | cpumask_clear(cs->effective_cpus); |
1967 | nodes_clear(cs->effective_mems); |
1968 | fmeter_init(&cs->fmeter); |
1969 | cs->relax_domain_level = -1; |
1970 | |
1971 | return &cs->css; |
1972 | |
1973 | free_requested: |
1974 | free_cpumask_var(cs->cpus_requested); |
1975 | free_allowed: |
1976 | free_cpumask_var(cs->cpus_allowed); |
1977 | free_cs: |
1978 | kfree(cs); |
1979 | return ERR_PTR(-ENOMEM); |
1980 | } |
1981 | |
1982 | static int cpuset_css_online(struct cgroup_subsys_state *css) |
1983 | { |
1984 | struct cpuset *cs = css_cs(css); |
1985 | struct cpuset *parent = parent_cs(cs); |
1986 | struct cpuset *tmp_cs; |
1987 | struct cgroup_subsys_state *pos_css; |
1988 | |
1989 | if (!parent) |
1990 | return 0; |
1991 | |
1992 | mutex_lock(&cpuset_mutex); |
1993 | |
1994 | set_bit(CS_ONLINE, &cs->flags); |
1995 | if (is_spread_page(parent)) |
1996 | set_bit(CS_SPREAD_PAGE, &cs->flags); |
1997 | if (is_spread_slab(parent)) |
1998 | set_bit(CS_SPREAD_SLAB, &cs->flags); |
1999 | |
2000 | cpuset_inc(); |
2001 | |
2002 | spin_lock_irq(&callback_lock); |
2003 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { |
2004 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
2005 | cs->effective_mems = parent->effective_mems; |
2006 | } |
2007 | spin_unlock_irq(&callback_lock); |
2008 | |
2009 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
2010 | goto out_unlock; |
2011 | |
2012 | /* |
2013 | * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is |
2014 | * set. This flag handling is implemented in cgroup core for |
2015 | * histrical reasons - the flag may be specified during mount. |
2016 | * |
2017 | * Currently, if any sibling cpusets have exclusive cpus or mem, we |
2018 | * refuse to clone the configuration - thereby refusing the task to |
2019 | * be entered, and as a result refusing the sys_unshare() or |
2020 | * clone() which initiated it. If this becomes a problem for some |
2021 | * users who wish to allow that scenario, then this could be |
2022 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive |
2023 | * (and likewise for mems) to the new cgroup. |
2024 | */ |
2025 | rcu_read_lock(); |
2026 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
2027 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
2028 | rcu_read_unlock(); |
2029 | goto out_unlock; |
2030 | } |
2031 | } |
2032 | rcu_read_unlock(); |
2033 | |
2034 | spin_lock_irq(&callback_lock); |
2035 | cs->mems_allowed = parent->mems_allowed; |
2036 | cs->effective_mems = parent->mems_allowed; |
2037 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
2038 | cpumask_copy(cs->cpus_requested, parent->cpus_requested); |
2039 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
2040 | spin_unlock_irq(&callback_lock); |
2041 | out_unlock: |
2042 | mutex_unlock(&cpuset_mutex); |
2043 | return 0; |
2044 | } |
2045 | |
2046 | /* |
2047 | * If the cpuset being removed has its flag 'sched_load_balance' |
2048 | * enabled, then simulate turning sched_load_balance off, which |
2049 | * will call rebuild_sched_domains_locked(). |
2050 | */ |
2051 | |
2052 | static void cpuset_css_offline(struct cgroup_subsys_state *css) |
2053 | { |
2054 | struct cpuset *cs = css_cs(css); |
2055 | |
2056 | mutex_lock(&cpuset_mutex); |
2057 | |
2058 | if (is_sched_load_balance(cs)) |
2059 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); |
2060 | |
2061 | cpuset_dec(); |
2062 | clear_bit(CS_ONLINE, &cs->flags); |
2063 | |
2064 | mutex_unlock(&cpuset_mutex); |
2065 | } |
2066 | |
2067 | static void cpuset_css_free(struct cgroup_subsys_state *css) |
2068 | { |
2069 | struct cpuset *cs = css_cs(css); |
2070 | |
2071 | free_cpumask_var(cs->effective_cpus); |
2072 | free_cpumask_var(cs->cpus_allowed); |
2073 | free_cpumask_var(cs->cpus_requested); |
2074 | kfree(cs); |
2075 | } |
2076 | |
2077 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
2078 | { |
2079 | mutex_lock(&cpuset_mutex); |
2080 | spin_lock_irq(&callback_lock); |
2081 | |
2082 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { |
2083 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
2084 | top_cpuset.mems_allowed = node_possible_map; |
2085 | } else { |
2086 | cpumask_copy(top_cpuset.cpus_allowed, |
2087 | top_cpuset.effective_cpus); |
2088 | top_cpuset.mems_allowed = top_cpuset.effective_mems; |
2089 | } |
2090 | |
2091 | spin_unlock_irq(&callback_lock); |
2092 | mutex_unlock(&cpuset_mutex); |
2093 | } |
2094 | |
2095 | /* |
2096 | * Make sure the new task conform to the current state of its parent, |
2097 | * which could have been changed by cpuset just after it inherits the |
2098 | * state from the parent and before it sits on the cgroup's task list. |
2099 | */ |
2100 | static void cpuset_fork(struct task_struct *task) |
2101 | { |
2102 | if (task_css_is_root(task, cpuset_cgrp_id)) |
2103 | return; |
2104 | |
2105 | set_cpus_allowed_ptr(task, ¤t->cpus_allowed); |
2106 | task->mems_allowed = current->mems_allowed; |
2107 | } |
2108 | |
2109 | struct cgroup_subsys cpuset_cgrp_subsys = { |
2110 | .css_alloc = cpuset_css_alloc, |
2111 | .css_online = cpuset_css_online, |
2112 | .css_offline = cpuset_css_offline, |
2113 | .css_free = cpuset_css_free, |
2114 | .can_attach = cpuset_can_attach, |
2115 | .cancel_attach = cpuset_cancel_attach, |
2116 | .attach = cpuset_attach, |
2117 | .post_attach = cpuset_post_attach, |
2118 | .bind = cpuset_bind, |
2119 | .fork = cpuset_fork, |
2120 | .legacy_cftypes = files, |
2121 | .early_init = true, |
2122 | }; |
2123 | |
2124 | /** |
2125 | * cpuset_init - initialize cpusets at system boot |
2126 | * |
2127 | * Description: Initialize top_cpuset and the cpuset internal file system, |
2128 | **/ |
2129 | |
2130 | int __init cpuset_init(void) |
2131 | { |
2132 | int err = 0; |
2133 | |
2134 | if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)) |
2135 | BUG(); |
2136 | if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)) |
2137 | BUG(); |
2138 | if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL)) |
2139 | BUG(); |
2140 | |
2141 | cpumask_setall(top_cpuset.cpus_allowed); |
2142 | cpumask_setall(top_cpuset.cpus_requested); |
2143 | nodes_setall(top_cpuset.mems_allowed); |
2144 | cpumask_setall(top_cpuset.effective_cpus); |
2145 | nodes_setall(top_cpuset.effective_mems); |
2146 | |
2147 | fmeter_init(&top_cpuset.fmeter); |
2148 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); |
2149 | top_cpuset.relax_domain_level = -1; |
2150 | |
2151 | err = register_filesystem(&cpuset_fs_type); |
2152 | if (err < 0) |
2153 | return err; |
2154 | |
2155 | if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)) |
2156 | BUG(); |
2157 | |
2158 | return 0; |
2159 | } |
2160 | |
2161 | /* |
2162 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
2163 | * or memory nodes, we need to walk over the cpuset hierarchy, |
2164 | * removing that CPU or node from all cpusets. If this removes the |
2165 | * last CPU or node from a cpuset, then move the tasks in the empty |
2166 | * cpuset to its next-highest non-empty parent. |
2167 | */ |
2168 | static void remove_tasks_in_empty_cpuset(struct cpuset *cs) |
2169 | { |
2170 | struct cpuset *parent; |
2171 | |
2172 | /* |
2173 | * Find its next-highest non-empty parent, (top cpuset |
2174 | * has online cpus, so can't be empty). |
2175 | */ |
2176 | parent = parent_cs(cs); |
2177 | while (cpumask_empty(parent->cpus_allowed) || |
2178 | nodes_empty(parent->mems_allowed)) |
2179 | parent = parent_cs(parent); |
2180 | |
2181 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
2182 | pr_err("cpuset: failed to transfer tasks out of empty cpuset "); |
2183 | pr_cont_cgroup_name(cs->css.cgroup); |
2184 | pr_cont("\n"); |
2185 | } |
2186 | } |
2187 | |
2188 | static void |
2189 | hotplug_update_tasks_legacy(struct cpuset *cs, |
2190 | struct cpumask *new_cpus, nodemask_t *new_mems, |
2191 | bool cpus_updated, bool mems_updated) |
2192 | { |
2193 | bool is_empty; |
2194 | |
2195 | spin_lock_irq(&callback_lock); |
2196 | cpumask_copy(cs->cpus_allowed, new_cpus); |
2197 | cpumask_copy(cs->effective_cpus, new_cpus); |
2198 | cs->mems_allowed = *new_mems; |
2199 | cs->effective_mems = *new_mems; |
2200 | spin_unlock_irq(&callback_lock); |
2201 | |
2202 | /* |
2203 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, |
2204 | * as the tasks will be migratecd to an ancestor. |
2205 | */ |
2206 | if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) |
2207 | update_tasks_cpumask(cs); |
2208 | if (mems_updated && !nodes_empty(cs->mems_allowed)) |
2209 | update_tasks_nodemask(cs); |
2210 | |
2211 | is_empty = cpumask_empty(cs->cpus_allowed) || |
2212 | nodes_empty(cs->mems_allowed); |
2213 | |
2214 | mutex_unlock(&cpuset_mutex); |
2215 | |
2216 | /* |
2217 | * Move tasks to the nearest ancestor with execution resources, |
2218 | * This is full cgroup operation which will also call back into |
2219 | * cpuset. Should be done outside any lock. |
2220 | */ |
2221 | if (is_empty) |
2222 | remove_tasks_in_empty_cpuset(cs); |
2223 | |
2224 | mutex_lock(&cpuset_mutex); |
2225 | } |
2226 | |
2227 | static void |
2228 | hotplug_update_tasks(struct cpuset *cs, |
2229 | struct cpumask *new_cpus, nodemask_t *new_mems, |
2230 | bool cpus_updated, bool mems_updated) |
2231 | { |
2232 | if (cpumask_empty(new_cpus)) |
2233 | cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); |
2234 | if (nodes_empty(*new_mems)) |
2235 | *new_mems = parent_cs(cs)->effective_mems; |
2236 | |
2237 | spin_lock_irq(&callback_lock); |
2238 | cpumask_copy(cs->effective_cpus, new_cpus); |
2239 | cs->effective_mems = *new_mems; |
2240 | spin_unlock_irq(&callback_lock); |
2241 | |
2242 | if (cpus_updated) |
2243 | update_tasks_cpumask(cs); |
2244 | if (mems_updated) |
2245 | update_tasks_nodemask(cs); |
2246 | } |
2247 | |
2248 | /** |
2249 | * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug |
2250 | * @cs: cpuset in interest |
2251 | * |
2252 | * Compare @cs's cpu and mem masks against top_cpuset and if some have gone |
2253 | * offline, update @cs accordingly. If @cs ends up with no CPU or memory, |
2254 | * all its tasks are moved to the nearest ancestor with both resources. |
2255 | */ |
2256 | static void cpuset_hotplug_update_tasks(struct cpuset *cs) |
2257 | { |
2258 | static cpumask_t new_cpus; |
2259 | static nodemask_t new_mems; |
2260 | bool cpus_updated; |
2261 | bool mems_updated; |
2262 | retry: |
2263 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); |
2264 | |
2265 | mutex_lock(&cpuset_mutex); |
2266 | |
2267 | /* |
2268 | * We have raced with task attaching. We wait until attaching |
2269 | * is finished, so we won't attach a task to an empty cpuset. |
2270 | */ |
2271 | if (cs->attach_in_progress) { |
2272 | mutex_unlock(&cpuset_mutex); |
2273 | goto retry; |
2274 | } |
2275 | |
2276 | cpumask_and(&new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus); |
2277 | nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems); |
2278 | |
2279 | cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); |
2280 | mems_updated = !nodes_equal(new_mems, cs->effective_mems); |
2281 | |
2282 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) |
2283 | hotplug_update_tasks(cs, &new_cpus, &new_mems, |
2284 | cpus_updated, mems_updated); |
2285 | else |
2286 | hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, |
2287 | cpus_updated, mems_updated); |
2288 | |
2289 | mutex_unlock(&cpuset_mutex); |
2290 | } |
2291 | |
2292 | static bool force_rebuild; |
2293 | |
2294 | void cpuset_force_rebuild(void) |
2295 | { |
2296 | force_rebuild = true; |
2297 | } |
2298 | |
2299 | /** |
2300 | * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset |
2301 | * |
2302 | * This function is called after either CPU or memory configuration has |
2303 | * changed and updates cpuset accordingly. The top_cpuset is always |
2304 | * synchronized to cpu_active_mask and N_MEMORY, which is necessary in |
2305 | * order to make cpusets transparent (of no affect) on systems that are |
2306 | * actively using CPU hotplug but making no active use of cpusets. |
2307 | * |
2308 | * Non-root cpusets are only affected by offlining. If any CPUs or memory |
2309 | * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on |
2310 | * all descendants. |
2311 | * |
2312 | * Note that CPU offlining during suspend is ignored. We don't modify |
2313 | * cpusets across suspend/resume cycles at all. |
2314 | */ |
2315 | static void cpuset_hotplug_workfn(struct work_struct *work) |
2316 | { |
2317 | static cpumask_t new_cpus; |
2318 | static nodemask_t new_mems; |
2319 | bool cpus_updated, mems_updated; |
2320 | bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys); |
2321 | |
2322 | mutex_lock(&cpuset_mutex); |
2323 | |
2324 | /* fetch the available cpus/mems and find out which changed how */ |
2325 | cpumask_copy(&new_cpus, cpu_active_mask); |
2326 | new_mems = node_states[N_MEMORY]; |
2327 | |
2328 | cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); |
2329 | mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); |
2330 | |
2331 | /* synchronize cpus_allowed to cpu_active_mask */ |
2332 | if (cpus_updated) { |
2333 | spin_lock_irq(&callback_lock); |
2334 | if (!on_dfl) |
2335 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); |
2336 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
2337 | spin_unlock_irq(&callback_lock); |
2338 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
2339 | } |
2340 | |
2341 | /* synchronize mems_allowed to N_MEMORY */ |
2342 | if (mems_updated) { |
2343 | spin_lock_irq(&callback_lock); |
2344 | if (!on_dfl) |
2345 | top_cpuset.mems_allowed = new_mems; |
2346 | top_cpuset.effective_mems = new_mems; |
2347 | spin_unlock_irq(&callback_lock); |
2348 | update_tasks_nodemask(&top_cpuset); |
2349 | } |
2350 | |
2351 | mutex_unlock(&cpuset_mutex); |
2352 | |
2353 | /* if cpus or mems changed, we need to propagate to descendants */ |
2354 | if (cpus_updated || mems_updated) { |
2355 | struct cpuset *cs; |
2356 | struct cgroup_subsys_state *pos_css; |
2357 | |
2358 | rcu_read_lock(); |
2359 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
2360 | if (cs == &top_cpuset || !css_tryget_online(&cs->css)) |
2361 | continue; |
2362 | rcu_read_unlock(); |
2363 | |
2364 | cpuset_hotplug_update_tasks(cs); |
2365 | |
2366 | rcu_read_lock(); |
2367 | css_put(&cs->css); |
2368 | } |
2369 | rcu_read_unlock(); |
2370 | } |
2371 | |
2372 | /* rebuild sched domains if cpus_allowed has changed */ |
2373 | if (cpus_updated || force_rebuild) { |
2374 | force_rebuild = false; |
2375 | rebuild_sched_domains(); |
2376 | } |
2377 | } |
2378 | |
2379 | void cpuset_update_active_cpus(bool cpu_online) |
2380 | { |
2381 | /* |
2382 | * We're inside cpu hotplug critical region which usually nests |
2383 | * inside cgroup synchronization. Bounce actual hotplug processing |
2384 | * to a work item to avoid reverse locking order. |
2385 | * |
2386 | * We still need to do partition_sched_domains() synchronously; |
2387 | * otherwise, the scheduler will get confused and put tasks to the |
2388 | * dead CPU. Fall back to the default single domain. |
2389 | * cpuset_hotplug_workfn() will rebuild it as necessary. |
2390 | */ |
2391 | partition_sched_domains(1, NULL, NULL); |
2392 | schedule_work(&cpuset_hotplug_work); |
2393 | } |
2394 | |
2395 | void cpuset_wait_for_hotplug(void) |
2396 | { |
2397 | flush_work(&cpuset_hotplug_work); |
2398 | } |
2399 | |
2400 | /* |
2401 | * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. |
2402 | * Call this routine anytime after node_states[N_MEMORY] changes. |
2403 | * See cpuset_update_active_cpus() for CPU hotplug handling. |
2404 | */ |
2405 | static int cpuset_track_online_nodes(struct notifier_block *self, |
2406 | unsigned long action, void *arg) |
2407 | { |
2408 | schedule_work(&cpuset_hotplug_work); |
2409 | return NOTIFY_OK; |
2410 | } |
2411 | |
2412 | static struct notifier_block cpuset_track_online_nodes_nb = { |
2413 | .notifier_call = cpuset_track_online_nodes, |
2414 | .priority = 10, /* ??! */ |
2415 | }; |
2416 | |
2417 | /** |
2418 | * cpuset_init_smp - initialize cpus_allowed |
2419 | * |
2420 | * Description: Finish top cpuset after cpu, node maps are initialized |
2421 | */ |
2422 | void __init cpuset_init_smp(void) |
2423 | { |
2424 | cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); |
2425 | top_cpuset.mems_allowed = node_states[N_MEMORY]; |
2426 | top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; |
2427 | |
2428 | cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); |
2429 | top_cpuset.effective_mems = node_states[N_MEMORY]; |
2430 | |
2431 | register_hotmemory_notifier(&cpuset_track_online_nodes_nb); |
2432 | |
2433 | cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); |
2434 | BUG_ON(!cpuset_migrate_mm_wq); |
2435 | } |
2436 | |
2437 | /** |
2438 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
2439 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
2440 | * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. |
2441 | * |
2442 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset |
2443 | * attached to the specified @tsk. Guaranteed to return some non-empty |
2444 | * subset of cpu_online_mask, even if this means going outside the |
2445 | * tasks cpuset. |
2446 | **/ |
2447 | |
2448 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
2449 | { |
2450 | unsigned long flags; |
2451 | |
2452 | spin_lock_irqsave(&callback_lock, flags); |
2453 | rcu_read_lock(); |
2454 | guarantee_online_cpus(task_cs(tsk), pmask); |
2455 | rcu_read_unlock(); |
2456 | spin_unlock_irqrestore(&callback_lock, flags); |
2457 | } |
2458 | |
2459 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
2460 | { |
2461 | rcu_read_lock(); |
2462 | do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus); |
2463 | rcu_read_unlock(); |
2464 | |
2465 | /* |
2466 | * We own tsk->cpus_allowed, nobody can change it under us. |
2467 | * |
2468 | * But we used cs && cs->cpus_allowed lockless and thus can |
2469 | * race with cgroup_attach_task() or update_cpumask() and get |
2470 | * the wrong tsk->cpus_allowed. However, both cases imply the |
2471 | * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() |
2472 | * which takes task_rq_lock(). |
2473 | * |
2474 | * If we are called after it dropped the lock we must see all |
2475 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary |
2476 | * set any mask even if it is not right from task_cs() pov, |
2477 | * the pending set_cpus_allowed_ptr() will fix things. |
2478 | * |
2479 | * select_fallback_rq() will fix things ups and set cpu_possible_mask |
2480 | * if required. |
2481 | */ |
2482 | } |
2483 | |
2484 | void __init cpuset_init_current_mems_allowed(void) |
2485 | { |
2486 | nodes_setall(current->mems_allowed); |
2487 | } |
2488 | |
2489 | /** |
2490 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. |
2491 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. |
2492 | * |
2493 | * Description: Returns the nodemask_t mems_allowed of the cpuset |
2494 | * attached to the specified @tsk. Guaranteed to return some non-empty |
2495 | * subset of node_states[N_MEMORY], even if this means going outside the |
2496 | * tasks cpuset. |
2497 | **/ |
2498 | |
2499 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) |
2500 | { |
2501 | nodemask_t mask; |
2502 | unsigned long flags; |
2503 | |
2504 | spin_lock_irqsave(&callback_lock, flags); |
2505 | rcu_read_lock(); |
2506 | guarantee_online_mems(task_cs(tsk), &mask); |
2507 | rcu_read_unlock(); |
2508 | spin_unlock_irqrestore(&callback_lock, flags); |
2509 | |
2510 | return mask; |
2511 | } |
2512 | |
2513 | /** |
2514 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed |
2515 | * @nodemask: the nodemask to be checked |
2516 | * |
2517 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
2518 | */ |
2519 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
2520 | { |
2521 | return nodes_intersects(*nodemask, current->mems_allowed); |
2522 | } |
2523 | |
2524 | /* |
2525 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
2526 | * mem_hardwall ancestor to the specified cpuset. Call holding |
2527 | * callback_lock. If no ancestor is mem_exclusive or mem_hardwall |
2528 | * (an unusual configuration), then returns the root cpuset. |
2529 | */ |
2530 | static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) |
2531 | { |
2532 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) |
2533 | cs = parent_cs(cs); |
2534 | return cs; |
2535 | } |
2536 | |
2537 | /** |
2538 | * cpuset_node_allowed - Can we allocate on a memory node? |
2539 | * @node: is this an allowed node? |
2540 | * @gfp_mask: memory allocation flags |
2541 | * |
2542 | * If we're in interrupt, yes, we can always allocate. If @node is set in |
2543 | * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this |
2544 | * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, |
2545 | * yes. If current has access to memory reserves due to TIF_MEMDIE, yes. |
2546 | * Otherwise, no. |
2547 | * |
2548 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, |
2549 | * and do not allow allocations outside the current tasks cpuset |
2550 | * unless the task has been OOM killed as is marked TIF_MEMDIE. |
2551 | * GFP_KERNEL allocations are not so marked, so can escape to the |
2552 | * nearest enclosing hardwalled ancestor cpuset. |
2553 | * |
2554 | * Scanning up parent cpusets requires callback_lock. The |
2555 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit |
2556 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the |
2557 | * current tasks mems_allowed came up empty on the first pass over |
2558 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the |
2559 | * cpuset are short of memory, might require taking the callback_lock. |
2560 | * |
2561 | * The first call here from mm/page_alloc:get_page_from_freelist() |
2562 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
2563 | * so no allocation on a node outside the cpuset is allowed (unless |
2564 | * in interrupt, of course). |
2565 | * |
2566 | * The second pass through get_page_from_freelist() doesn't even call |
2567 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() |
2568 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set |
2569 | * in alloc_flags. That logic and the checks below have the combined |
2570 | * affect that: |
2571 | * in_interrupt - any node ok (current task context irrelevant) |
2572 | * GFP_ATOMIC - any node ok |
2573 | * TIF_MEMDIE - any node ok |
2574 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
2575 | * GFP_USER - only nodes in current tasks mems allowed ok. |
2576 | */ |
2577 | bool __cpuset_node_allowed(int node, gfp_t gfp_mask) |
2578 | { |
2579 | struct cpuset *cs; /* current cpuset ancestors */ |
2580 | int allowed; /* is allocation in zone z allowed? */ |
2581 | unsigned long flags; |
2582 | |
2583 | if (in_interrupt()) |
2584 | return true; |
2585 | if (node_isset(node, current->mems_allowed)) |
2586 | return true; |
2587 | /* |
2588 | * Allow tasks that have access to memory reserves because they have |
2589 | * been OOM killed to get memory anywhere. |
2590 | */ |
2591 | if (unlikely(test_thread_flag(TIF_MEMDIE))) |
2592 | return true; |
2593 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
2594 | return false; |
2595 | |
2596 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
2597 | return true; |
2598 | |
2599 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
2600 | spin_lock_irqsave(&callback_lock, flags); |
2601 | |
2602 | rcu_read_lock(); |
2603 | cs = nearest_hardwall_ancestor(task_cs(current)); |
2604 | allowed = node_isset(node, cs->mems_allowed); |
2605 | rcu_read_unlock(); |
2606 | |
2607 | spin_unlock_irqrestore(&callback_lock, flags); |
2608 | return allowed; |
2609 | } |
2610 | |
2611 | /** |
2612 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
2613 | * cpuset_slab_spread_node() - On which node to begin search for a slab page |
2614 | * |
2615 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for |
2616 | * tasks in a cpuset with is_spread_page or is_spread_slab set), |
2617 | * and if the memory allocation used cpuset_mem_spread_node() |
2618 | * to determine on which node to start looking, as it will for |
2619 | * certain page cache or slab cache pages such as used for file |
2620 | * system buffers and inode caches, then instead of starting on the |
2621 | * local node to look for a free page, rather spread the starting |
2622 | * node around the tasks mems_allowed nodes. |
2623 | * |
2624 | * We don't have to worry about the returned node being offline |
2625 | * because "it can't happen", and even if it did, it would be ok. |
2626 | * |
2627 | * The routines calling guarantee_online_mems() are careful to |
2628 | * only set nodes in task->mems_allowed that are online. So it |
2629 | * should not be possible for the following code to return an |
2630 | * offline node. But if it did, that would be ok, as this routine |
2631 | * is not returning the node where the allocation must be, only |
2632 | * the node where the search should start. The zonelist passed to |
2633 | * __alloc_pages() will include all nodes. If the slab allocator |
2634 | * is passed an offline node, it will fall back to the local node. |
2635 | * See kmem_cache_alloc_node(). |
2636 | */ |
2637 | |
2638 | static int cpuset_spread_node(int *rotor) |
2639 | { |
2640 | return *rotor = next_node_in(*rotor, current->mems_allowed); |
2641 | } |
2642 | |
2643 | int cpuset_mem_spread_node(void) |
2644 | { |
2645 | if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) |
2646 | current->cpuset_mem_spread_rotor = |
2647 | node_random(¤t->mems_allowed); |
2648 | |
2649 | return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); |
2650 | } |
2651 | |
2652 | int cpuset_slab_spread_node(void) |
2653 | { |
2654 | if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) |
2655 | current->cpuset_slab_spread_rotor = |
2656 | node_random(¤t->mems_allowed); |
2657 | |
2658 | return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); |
2659 | } |
2660 | |
2661 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); |
2662 | |
2663 | /** |
2664 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
2665 | * @tsk1: pointer to task_struct of some task. |
2666 | * @tsk2: pointer to task_struct of some other task. |
2667 | * |
2668 | * Description: Return true if @tsk1's mems_allowed intersects the |
2669 | * mems_allowed of @tsk2. Used by the OOM killer to determine if |
2670 | * one of the task's memory usage might impact the memory available |
2671 | * to the other. |
2672 | **/ |
2673 | |
2674 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
2675 | const struct task_struct *tsk2) |
2676 | { |
2677 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
2678 | } |
2679 | |
2680 | /** |
2681 | * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed |
2682 | * |
2683 | * Description: Prints current's name, cpuset name, and cached copy of its |
2684 | * mems_allowed to the kernel log. |
2685 | */ |
2686 | void cpuset_print_current_mems_allowed(void) |
2687 | { |
2688 | struct cgroup *cgrp; |
2689 | |
2690 | rcu_read_lock(); |
2691 | |
2692 | cgrp = task_cs(current)->css.cgroup; |
2693 | pr_info("%s cpuset=", current->comm); |
2694 | pr_cont_cgroup_name(cgrp); |
2695 | pr_cont(" mems_allowed=%*pbl\n", |
2696 | nodemask_pr_args(¤t->mems_allowed)); |
2697 | |
2698 | rcu_read_unlock(); |
2699 | } |
2700 | |
2701 | /* |
2702 | * Collection of memory_pressure is suppressed unless |
2703 | * this flag is enabled by writing "1" to the special |
2704 | * cpuset file 'memory_pressure_enabled' in the root cpuset. |
2705 | */ |
2706 | |
2707 | int cpuset_memory_pressure_enabled __read_mostly; |
2708 | |
2709 | /** |
2710 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. |
2711 | * |
2712 | * Keep a running average of the rate of synchronous (direct) |
2713 | * page reclaim efforts initiated by tasks in each cpuset. |
2714 | * |
2715 | * This represents the rate at which some task in the cpuset |
2716 | * ran low on memory on all nodes it was allowed to use, and |
2717 | * had to enter the kernels page reclaim code in an effort to |
2718 | * create more free memory by tossing clean pages or swapping |
2719 | * or writing dirty pages. |
2720 | * |
2721 | * Display to user space in the per-cpuset read-only file |
2722 | * "memory_pressure". Value displayed is an integer |
2723 | * representing the recent rate of entry into the synchronous |
2724 | * (direct) page reclaim by any task attached to the cpuset. |
2725 | **/ |
2726 | |
2727 | void __cpuset_memory_pressure_bump(void) |
2728 | { |
2729 | rcu_read_lock(); |
2730 | fmeter_markevent(&task_cs(current)->fmeter); |
2731 | rcu_read_unlock(); |
2732 | } |
2733 | |
2734 | #ifdef CONFIG_PROC_PID_CPUSET |
2735 | /* |
2736 | * proc_cpuset_show() |
2737 | * - Print tasks cpuset path into seq_file. |
2738 | * - Used for /proc/<pid>/cpuset. |
2739 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
2740 | * doesn't really matter if tsk->cpuset changes after we read it, |
2741 | * and we take cpuset_mutex, keeping cpuset_attach() from changing it |
2742 | * anyway. |
2743 | */ |
2744 | int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
2745 | struct pid *pid, struct task_struct *tsk) |
2746 | { |
2747 | char *buf; |
2748 | struct cgroup_subsys_state *css; |
2749 | int retval; |
2750 | |
2751 | retval = -ENOMEM; |
2752 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
2753 | if (!buf) |
2754 | goto out; |
2755 | |
2756 | css = task_get_css(tsk, cpuset_cgrp_id); |
2757 | retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, |
2758 | current->nsproxy->cgroup_ns); |
2759 | css_put(css); |
2760 | if (retval >= PATH_MAX) |
2761 | retval = -ENAMETOOLONG; |
2762 | if (retval < 0) |
2763 | goto out_free; |
2764 | seq_puts(m, buf); |
2765 | seq_putc(m, '\n'); |
2766 | retval = 0; |
2767 | out_free: |
2768 | kfree(buf); |
2769 | out: |
2770 | return retval; |
2771 | } |
2772 | #endif /* CONFIG_PROC_PID_CPUSET */ |
2773 | |
2774 | /* Display task mems_allowed in /proc/<pid>/status file. */ |
2775 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
2776 | { |
2777 | seq_printf(m, "Mems_allowed:\t%*pb\n", |
2778 | nodemask_pr_args(&task->mems_allowed)); |
2779 | seq_printf(m, "Mems_allowed_list:\t%*pbl\n", |
2780 | nodemask_pr_args(&task->mems_allowed)); |
2781 | } |
2782 |