Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kernel/cpuset.c |
| 3 | * |
| 4 | * Processor and Memory placement constraints for sets of tasks. |
| 5 | * |
| 6 | * Copyright (C) 2003 BULL SA. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 8 | * Copyright (C) 2006 Google, Inc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * Portions derived from Patrick Mochel's sysfs code. |
| 11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 13 | * 2003-10-10 Written by Simon Derr. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 15 | * 2004 May-July Rework by Paul Jackson. |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 17 | * 2008 Rework of the scheduler domains and CPU hotplug handling |
| 18 | * by Max Krasnyansky |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | * |
| 20 | * This file is subject to the terms and conditions of the GNU General Public |
| 21 | * License. See the file COPYING in the main directory of the Linux |
| 22 | * distribution for more details. |
| 23 | */ |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/cpu.h> |
| 26 | #include <linux/cpumask.h> |
| 27 | #include <linux/cpuset.h> |
| 28 | #include <linux/err.h> |
| 29 | #include <linux/errno.h> |
| 30 | #include <linux/file.h> |
| 31 | #include <linux/fs.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/interrupt.h> |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/kmod.h> |
| 36 | #include <linux/list.h> |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 37 | #include <linux/mempolicy.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include <linux/mm.h> |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 39 | #include <linux/memory.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 40 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/mount.h> |
| 42 | #include <linux/namei.h> |
| 43 | #include <linux/pagemap.h> |
| 44 | #include <linux/proc_fs.h> |
Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 45 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <linux/sched.h> |
| 47 | #include <linux/seq_file.h> |
David Quigley | 22fb52d | 2006-06-23 02:04:00 -0700 | [diff] [blame] | 48 | #include <linux/security.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #include <linux/spinlock.h> |
| 51 | #include <linux/stat.h> |
| 52 | #include <linux/string.h> |
| 53 | #include <linux/time.h> |
Arnd Bergmann | d2b4365 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 54 | #include <linux/time64.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #include <linux/backing-dev.h> |
| 56 | #include <linux/sort.h> |
| 57 | |
| 58 | #include <asm/uaccess.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 59 | #include <linux/atomic.h> |
Ingo Molnar | 3d3f26a | 2006-03-23 03:00:18 -0800 | [diff] [blame] | 60 | #include <linux/mutex.h> |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 61 | #include <linux/cgroup.h> |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 62 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
Dima Zavin | 45a636e | 2017-08-02 13:32:18 -0700 | [diff] [blame] | 64 | DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 65 | DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 66 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 67 | /* See "Frequency meter" comments, below. */ |
| 68 | |
| 69 | struct fmeter { |
| 70 | int cnt; /* unprocessed events count */ |
| 71 | int val; /* most recent output value */ |
Arnd Bergmann | d2b4365 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 72 | time64_t time; /* clock (secs) when val computed */ |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 73 | spinlock_t lock; /* guards read or write of above */ |
| 74 | }; |
| 75 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | struct cpuset { |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 77 | struct cgroup_subsys_state css; |
| 78 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | unsigned long flags; /* "unsigned long" so bitops work */ |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 80 | |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 81 | /* |
| 82 | * On default hierarchy: |
| 83 | * |
| 84 | * The user-configured masks can only be changed by writing to |
| 85 | * cpuset.cpus and cpuset.mems, and won't be limited by the |
| 86 | * parent masks. |
| 87 | * |
| 88 | * The effective masks is the real masks that apply to the tasks |
| 89 | * in the cpuset. They may be changed if the configured masks are |
| 90 | * changed or hotplug happens. |
| 91 | * |
| 92 | * effective_mask == configured_mask & parent's effective_mask, |
| 93 | * and if it ends up empty, it will inherit the parent's mask. |
| 94 | * |
| 95 | * |
| 96 | * On legacy hierachy: |
| 97 | * |
| 98 | * The user-configured masks are always the same with effective masks. |
| 99 | */ |
| 100 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 101 | /* user-configured CPUs and Memory Nodes allow to tasks */ |
| 102 | cpumask_var_t cpus_allowed; |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 103 | cpumask_var_t cpus_requested; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 104 | nodemask_t mems_allowed; |
| 105 | |
| 106 | /* effective CPUs and Memory Nodes allow to tasks */ |
| 107 | cpumask_var_t effective_cpus; |
| 108 | nodemask_t effective_mems; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 110 | /* |
| 111 | * This is old Memory Nodes tasks took on. |
| 112 | * |
| 113 | * - top_cpuset.old_mems_allowed is initialized to mems_allowed. |
| 114 | * - A new cpuset's old_mems_allowed is initialized when some |
| 115 | * task is moved into it. |
| 116 | * - old_mems_allowed is used in cpuset_migrate_mm() when we change |
| 117 | * cpuset.mems_allowed and have tasks' nodemask updated, and |
| 118 | * then old_mems_allowed is updated to mems_allowed. |
| 119 | */ |
| 120 | nodemask_t old_mems_allowed; |
| 121 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 122 | struct fmeter fmeter; /* memory_pressure filter */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 123 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 124 | /* |
| 125 | * Tasks are being attached to this cpuset. Used to prevent |
| 126 | * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). |
| 127 | */ |
| 128 | int attach_in_progress; |
| 129 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 130 | /* partition number for rebuild_sched_domains() */ |
| 131 | int pn; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 132 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 133 | /* for custom sched domain */ |
| 134 | int relax_domain_level; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | }; |
| 136 | |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 137 | static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 138 | { |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 139 | return css ? container_of(css, struct cpuset, css) : NULL; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | /* Retrieve the cpuset for a task */ |
| 143 | static inline struct cpuset *task_cs(struct task_struct *task) |
| 144 | { |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 145 | return css_cs(task_css(task, cpuset_cgrp_id)); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 146 | } |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 147 | |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 148 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 149 | { |
Tejun Heo | 5c9d535 | 2014-05-16 13:22:48 -0400 | [diff] [blame] | 150 | return css_cs(cs->css.parent); |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 151 | } |
| 152 | |
David Rientjes | b246272 | 2011-12-19 17:11:52 -0800 | [diff] [blame] | 153 | #ifdef CONFIG_NUMA |
| 154 | static inline bool task_has_mempolicy(struct task_struct *task) |
| 155 | { |
| 156 | return task->mempolicy; |
| 157 | } |
| 158 | #else |
| 159 | static inline bool task_has_mempolicy(struct task_struct *task) |
| 160 | { |
| 161 | return false; |
| 162 | } |
| 163 | #endif |
| 164 | |
| 165 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | /* bits in struct cpuset flags field */ |
| 167 | typedef enum { |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 168 | CS_ONLINE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | CS_CPU_EXCLUSIVE, |
| 170 | CS_MEM_EXCLUSIVE, |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 171 | CS_MEM_HARDWALL, |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 172 | CS_MEMORY_MIGRATE, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 173 | CS_SCHED_LOAD_BALANCE, |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 174 | CS_SPREAD_PAGE, |
| 175 | CS_SPREAD_SLAB, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | } cpuset_flagbits_t; |
| 177 | |
| 178 | /* convenient tests for these bits */ |
Tejun Heo | 829a1ca | 2017-05-24 12:03:48 -0400 | [diff] [blame] | 179 | static inline bool is_cpuset_online(struct cpuset *cs) |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 180 | { |
Tejun Heo | 829a1ca | 2017-05-24 12:03:48 -0400 | [diff] [blame] | 181 | return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 182 | } |
| 183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
| 185 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 186 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | static inline int is_mem_exclusive(const struct cpuset *cs) |
| 190 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 191 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | } |
| 193 | |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 194 | static inline int is_mem_hardwall(const struct cpuset *cs) |
| 195 | { |
| 196 | return test_bit(CS_MEM_HARDWALL, &cs->flags); |
| 197 | } |
| 198 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 199 | static inline int is_sched_load_balance(const struct cpuset *cs) |
| 200 | { |
| 201 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
| 202 | } |
| 203 | |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 204 | static inline int is_memory_migrate(const struct cpuset *cs) |
| 205 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 206 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 207 | } |
| 208 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 209 | static inline int is_spread_page(const struct cpuset *cs) |
| 210 | { |
| 211 | return test_bit(CS_SPREAD_PAGE, &cs->flags); |
| 212 | } |
| 213 | |
| 214 | static inline int is_spread_slab(const struct cpuset *cs) |
| 215 | { |
| 216 | return test_bit(CS_SPREAD_SLAB, &cs->flags); |
| 217 | } |
| 218 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | static struct cpuset top_cpuset = { |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 220 | .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | |
| 221 | (1 << CS_MEM_EXCLUSIVE)), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | }; |
| 223 | |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 224 | /** |
| 225 | * cpuset_for_each_child - traverse online children of a cpuset |
| 226 | * @child_cs: loop cursor pointing to the current child |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 227 | * @pos_css: used for iteration |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 228 | * @parent_cs: target cpuset to walk children of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | * |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 230 | * Walk @child_cs through the online children of @parent_cs. Must be used |
| 231 | * with RCU read locked. |
| 232 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 233 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
| 234 | css_for_each_child((pos_css), &(parent_cs)->css) \ |
| 235 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 236 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 237 | /** |
| 238 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants |
| 239 | * @des_cs: loop cursor pointing to the current descendant |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 240 | * @pos_css: used for iteration |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 241 | * @root_cs: target cpuset to walk ancestor of |
| 242 | * |
| 243 | * Walk @des_cs through the online descendants of @root_cs. Must be used |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 244 | * with RCU read locked. The caller may modify @pos_css by calling |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 245 | * css_rightmost_descendant() to skip subtree. @root_cs is included in the |
| 246 | * iteration and the first node to be visited. |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 247 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 248 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
| 249 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ |
| 250 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 251 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | /* |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 253 | * There are two global locks guarding cpuset structures - cpuset_mutex and |
| 254 | * callback_lock. We also require taking task_lock() when dereferencing a |
| 255 | * task's cpuset pointer. See "The task_lock() exception", at the end of this |
| 256 | * comment. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 258 | * A task must hold both locks to modify cpusets. If a task holds |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 259 | * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 260 | * is the only task able to also acquire callback_lock and be able to |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 261 | * modify cpusets. It can perform various checks on the cpuset structure |
| 262 | * first, knowing nothing will change. It can also allocate memory while |
| 263 | * just holding cpuset_mutex. While it is performing these checks, various |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 264 | * callback routines can briefly acquire callback_lock to query cpusets. |
| 265 | * Once it is ready to make the changes, it takes callback_lock, blocking |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 266 | * everyone else. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | * |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 268 | * Calls to the kernel memory allocator can not be made while holding |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 269 | * callback_lock, as that would risk double tripping on callback_lock |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 270 | * from one of the callbacks into the cpuset code from within |
| 271 | * __alloc_pages(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 273 | * If a task is only holding callback_lock, then it has read-only |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 274 | * access to cpusets. |
| 275 | * |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 276 | * Now, the task_struct fields mems_allowed and mempolicy may be changed |
| 277 | * by other task, we use alloc_lock in the task_struct fields to protect |
| 278 | * them. |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 279 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 280 | * The cpuset_common_file_read() handlers only hold callback_lock across |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 281 | * small pieces of code, such as when reading out possibly multi-word |
| 282 | * cpumasks and nodemasks. |
| 283 | * |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 284 | * Accessing a task's cpuset should be done in accordance with the |
| 285 | * guidelines for accessing subsystem state in kernel/cgroup.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | */ |
| 287 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 288 | static DEFINE_MUTEX(cpuset_mutex); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 289 | static DEFINE_SPINLOCK(callback_lock); |
Paul Jackson | 4247bdc | 2005-09-10 00:26:06 -0700 | [diff] [blame] | 290 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 291 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
| 292 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 293 | /* |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 294 | * CPU / memory hotplug is handled asynchronously. |
| 295 | */ |
| 296 | static void cpuset_hotplug_workfn(struct work_struct *work); |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 297 | static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); |
| 298 | |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 299 | static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); |
| 300 | |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 301 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 302 | * This is ugly, but preserves the userspace API for existing cpuset |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 303 | * users. If someone tries to mount the "cpuset" filesystem, we |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 304 | * silently switch it to mount "cgroup" instead |
| 305 | */ |
Al Viro | f7e8357 | 2010-07-26 13:23:11 +0400 | [diff] [blame] | 306 | static struct dentry *cpuset_mount(struct file_system_type *fs_type, |
| 307 | int flags, const char *unused_dev_name, void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | { |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 309 | struct file_system_type *cgroup_fs = get_fs_type("cgroup"); |
Al Viro | f7e8357 | 2010-07-26 13:23:11 +0400 | [diff] [blame] | 310 | struct dentry *ret = ERR_PTR(-ENODEV); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 311 | if (cgroup_fs) { |
| 312 | char mountopts[] = |
| 313 | "cpuset,noprefix," |
| 314 | "release_agent=/sbin/cpuset_release_agent"; |
Al Viro | f7e8357 | 2010-07-26 13:23:11 +0400 | [diff] [blame] | 315 | ret = cgroup_fs->mount(cgroup_fs, flags, |
| 316 | unused_dev_name, mountopts); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 317 | put_filesystem(cgroup_fs); |
| 318 | } |
| 319 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | } |
| 321 | |
| 322 | static struct file_system_type cpuset_fs_type = { |
| 323 | .name = "cpuset", |
Al Viro | f7e8357 | 2010-07-26 13:23:11 +0400 | [diff] [blame] | 324 | .mount = cpuset_mount, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | }; |
| 326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | /* |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 328 | * Return in pmask the portion of a cpusets's cpus_allowed that |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | * are online. If none are online, walk up the cpuset hierarchy |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 330 | * until we find one that does have some online cpus. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | * |
| 332 | * One way or another, we guarantee to return some non-empty subset |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 333 | * of cpu_online_mask. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 335 | * Call with callback_lock or cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 337 | static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | { |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 339 | while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 340 | cs = parent_cs(cs); |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 341 | if (unlikely(!cs)) { |
| 342 | /* |
| 343 | * The top cpuset doesn't have any online cpu as a |
| 344 | * consequence of a race between cpuset_hotplug_work |
| 345 | * and cpu hotplug notifier. But we know the top |
| 346 | * cpuset's effective_cpus is on its way to to be |
| 347 | * identical to cpu_online_mask. |
| 348 | */ |
| 349 | cpumask_copy(pmask, cpu_online_mask); |
| 350 | return; |
| 351 | } |
| 352 | } |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 353 | cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | /* |
| 357 | * Return in *pmask the portion of a cpusets's mems_allowed that |
Christoph Lameter | 0e1e7c7 | 2007-10-16 01:25:38 -0700 | [diff] [blame] | 358 | * are online, with memory. If none are online with memory, walk |
| 359 | * up the cpuset hierarchy until we find one that does have some |
Li Zefan | 40df2de | 2013-06-05 17:15:23 +0800 | [diff] [blame] | 360 | * online mems. The top cpuset always has some mems online. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | * |
| 362 | * One way or another, we guarantee to return some non-empty subset |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 363 | * of node_states[N_MEMORY]. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 365 | * Call with callback_lock or cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 367 | static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | { |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 369 | while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 370 | cs = parent_cs(cs); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 371 | nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | } |
| 373 | |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 374 | /* |
| 375 | * update task's spread flag if cpuset's page/slab spread flag is set |
| 376 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 377 | * Call with callback_lock or cpuset_mutex held. |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 378 | */ |
| 379 | static void cpuset_update_task_spread_flag(struct cpuset *cs, |
| 380 | struct task_struct *tsk) |
| 381 | { |
| 382 | if (is_spread_page(cs)) |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 383 | task_set_spread_page(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 384 | else |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 385 | task_clear_spread_page(tsk); |
| 386 | |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 387 | if (is_spread_slab(cs)) |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 388 | task_set_spread_slab(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 389 | else |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 390 | task_clear_spread_slab(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 391 | } |
| 392 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | /* |
| 394 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? |
| 395 | * |
| 396 | * One cpuset is a subset of another if all its allowed CPUs and |
| 397 | * Memory Nodes are a subset of the other, and its exclusive flags |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 398 | * are only set if the other's are set. Call holding cpuset_mutex. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | */ |
| 400 | |
| 401 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) |
| 402 | { |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 403 | return cpumask_subset(p->cpus_requested, q->cpus_requested) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | nodes_subset(p->mems_allowed, q->mems_allowed) && |
| 405 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && |
| 406 | is_mem_exclusive(p) <= is_mem_exclusive(q); |
| 407 | } |
| 408 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 409 | /** |
| 410 | * alloc_trial_cpuset - allocate a trial cpuset |
| 411 | * @cs: the cpuset that the trial cpuset duplicates |
| 412 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 413 | static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 414 | { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 415 | struct cpuset *trial; |
| 416 | |
| 417 | trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); |
| 418 | if (!trial) |
| 419 | return NULL; |
| 420 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 421 | if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) |
| 422 | goto free_cs; |
| 423 | if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL)) |
| 424 | goto free_cpus; |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 425 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 426 | cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); |
| 427 | cpumask_copy(trial->effective_cpus, cs->effective_cpus); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 428 | return trial; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 429 | |
| 430 | free_cpus: |
| 431 | free_cpumask_var(trial->cpus_allowed); |
| 432 | free_cs: |
| 433 | kfree(trial); |
| 434 | return NULL; |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 435 | } |
| 436 | |
| 437 | /** |
| 438 | * free_trial_cpuset - free the trial cpuset |
| 439 | * @trial: the trial cpuset to be freed |
| 440 | */ |
| 441 | static void free_trial_cpuset(struct cpuset *trial) |
| 442 | { |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 443 | free_cpumask_var(trial->effective_cpus); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 444 | free_cpumask_var(trial->cpus_allowed); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 445 | kfree(trial); |
| 446 | } |
| 447 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | /* |
| 449 | * validate_change() - Used to validate that any proposed cpuset change |
| 450 | * follows the structural rules for cpusets. |
| 451 | * |
| 452 | * If we replaced the flag and mask values of the current cpuset |
| 453 | * (cur) with those values in the trial cpuset (trial), would |
| 454 | * our various subset and exclusive rules still be valid? Presumes |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 455 | * cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | * |
| 457 | * 'cur' is the address of an actual, in-use cpuset. Operations |
| 458 | * such as list traversal that depend on the actual address of the |
| 459 | * cpuset in the list must use cur below, not trial. |
| 460 | * |
| 461 | * 'trial' is the address of bulk structure copy of cur, with |
| 462 | * perhaps one or more of the fields cpus_allowed, mems_allowed, |
| 463 | * or flags changed to new, trial values. |
| 464 | * |
| 465 | * Return 0 if valid, -errno if not. |
| 466 | */ |
| 467 | |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 468 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | { |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 470 | struct cgroup_subsys_state *css; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | struct cpuset *c, *par; |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 472 | int ret; |
| 473 | |
| 474 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | |
| 476 | /* Each of our child cpusets must be a subset of us */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 477 | ret = -EBUSY; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 478 | cpuset_for_each_child(c, css, cur) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 479 | if (!is_cpuset_subset(c, trial)) |
| 480 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | |
| 482 | /* Remaining checks don't apply to root cpuset */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 483 | ret = 0; |
Paul Jackson | 6960406 | 2006-12-06 20:36:15 -0800 | [diff] [blame] | 484 | if (cur == &top_cpuset) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 485 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 487 | par = parent_cs(cur); |
Paul Jackson | 6960406 | 2006-12-06 20:36:15 -0800 | [diff] [blame] | 488 | |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 489 | /* On legacy hiearchy, we must be a subset of our parent cpuset. */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 490 | ret = -EACCES; |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 491 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
| 492 | !is_cpuset_subset(trial, par)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 493 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 495 | /* |
| 496 | * If either I or some sibling (!= me) is exclusive, we can't |
| 497 | * overlap |
| 498 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 499 | ret = -EINVAL; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 500 | cpuset_for_each_child(c, css, par) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
| 502 | c != cur && |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 503 | cpumask_intersects(trial->cpus_requested, c->cpus_requested)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 504 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && |
| 506 | c != cur && |
| 507 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 508 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | } |
| 510 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 511 | /* |
| 512 | * Cpusets with tasks - existing or newly being attached - can't |
Li Zefan | 1c09b19 | 2013-08-21 10:22:28 +0800 | [diff] [blame] | 513 | * be changed to have empty cpus_allowed or mems_allowed. |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 514 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 515 | ret = -ENOSPC; |
Tejun Heo | 27bd4db | 2015-10-15 16:41:50 -0400 | [diff] [blame] | 516 | if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { |
Li Zefan | 1c09b19 | 2013-08-21 10:22:28 +0800 | [diff] [blame] | 517 | if (!cpumask_empty(cur->cpus_allowed) && |
| 518 | cpumask_empty(trial->cpus_allowed)) |
| 519 | goto out; |
| 520 | if (!nodes_empty(cur->mems_allowed) && |
| 521 | nodes_empty(trial->mems_allowed)) |
| 522 | goto out; |
| 523 | } |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 524 | |
Juri Lelli | f82f804 | 2014-10-07 09:52:11 +0100 | [diff] [blame] | 525 | /* |
| 526 | * We can't shrink if we won't have enough room for SCHED_DEADLINE |
| 527 | * tasks. |
| 528 | */ |
| 529 | ret = -EBUSY; |
| 530 | if (is_cpu_exclusive(cur) && |
| 531 | !cpuset_cpumask_can_shrink(cur->cpus_allowed, |
| 532 | trial->cpus_allowed)) |
| 533 | goto out; |
| 534 | |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 535 | ret = 0; |
| 536 | out: |
| 537 | rcu_read_unlock(); |
| 538 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | } |
| 540 | |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 541 | #ifdef CONFIG_SMP |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 542 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 543 | * Helper routine for generate_sched_domains(). |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 544 | * Do cpusets a, b have overlapping effective cpus_allowed masks? |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 545 | */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 546 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
| 547 | { |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 548 | return cpumask_intersects(a->effective_cpus, b->effective_cpus); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 549 | } |
| 550 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 551 | static void |
| 552 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) |
| 553 | { |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 554 | if (dattr->relax_domain_level < c->relax_domain_level) |
| 555 | dattr->relax_domain_level = c->relax_domain_level; |
| 556 | return; |
| 557 | } |
| 558 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 559 | static void update_domain_attr_tree(struct sched_domain_attr *dattr, |
| 560 | struct cpuset *root_cs) |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 561 | { |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 562 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 563 | struct cgroup_subsys_state *pos_css; |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 564 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 565 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 566 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 567 | /* skip the whole subtree if @cp doesn't have any CPU */ |
| 568 | if (cpumask_empty(cp->cpus_allowed)) { |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 569 | pos_css = css_rightmost_descendant(pos_css); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 570 | continue; |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 571 | } |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 572 | |
| 573 | if (is_sched_load_balance(cp)) |
| 574 | update_domain_attr(dattr, cp); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 575 | } |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 576 | rcu_read_unlock(); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 577 | } |
| 578 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 579 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 580 | * generate_sched_domains() |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 581 | * |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 582 | * This function builds a partial partition of the systems CPUs |
| 583 | * A 'partial partition' is a set of non-overlapping subsets whose |
| 584 | * union is a subset of that set. |
Viresh Kumar | 0a0fca9 | 2013-06-04 13:10:24 +0530 | [diff] [blame] | 585 | * The output of this function needs to be passed to kernel/sched/core.c |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 586 | * partition_sched_domains() routine, which will rebuild the scheduler's |
| 587 | * load balancing domains (sched domains) as specified by that partial |
| 588 | * partition. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 589 | * |
Li Zefan | 45ce80f | 2009-01-15 13:50:59 -0800 | [diff] [blame] | 590 | * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 591 | * for a background explanation of this. |
| 592 | * |
| 593 | * Does not return errors, on the theory that the callers of this |
| 594 | * routine would rather not worry about failures to rebuild sched |
| 595 | * domains when operating in the severe memory shortage situations |
| 596 | * that could cause allocation failures below. |
| 597 | * |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 598 | * Must be called with cpuset_mutex held. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 599 | * |
| 600 | * The three key local variables below are: |
Li Zefan | aeed682 | 2008-07-29 22:33:24 -0700 | [diff] [blame] | 601 | * q - a linked-list queue of cpuset pointers, used to implement a |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 602 | * top-down scan of all cpusets. This scan loads a pointer |
| 603 | * to each cpuset marked is_sched_load_balance into the |
| 604 | * array 'csa'. For our purposes, rebuilding the schedulers |
| 605 | * sched domains, we can ignore !is_sched_load_balance cpusets. |
| 606 | * csa - (for CpuSet Array) Array of pointers to all the cpusets |
| 607 | * that need to be load balanced, for convenient iterative |
| 608 | * access by the subsequent code that finds the best partition, |
| 609 | * i.e the set of domains (subsets) of CPUs such that the |
| 610 | * cpus_allowed of every cpuset marked is_sched_load_balance |
| 611 | * is a subset of one of these domains, while there are as |
| 612 | * many such domains as possible, each as small as possible. |
| 613 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to |
Viresh Kumar | 0a0fca9 | 2013-06-04 13:10:24 +0530 | [diff] [blame] | 614 | * the kernel/sched/core.c routine partition_sched_domains() in a |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 615 | * convenient format, that can be easily compared to the prior |
| 616 | * value to determine what partition elements (sched domains) |
| 617 | * were changed (added or removed.) |
| 618 | * |
| 619 | * Finding the best partition (set of domains): |
| 620 | * The triple nested loops below over i, j, k scan over the |
| 621 | * load balanced cpusets (using the array of cpuset pointers in |
| 622 | * csa[]) looking for pairs of cpusets that have overlapping |
| 623 | * cpus_allowed, but which don't have the same 'pn' partition |
| 624 | * number and gives them in the same partition number. It keeps |
| 625 | * looping on the 'restart' label until it can no longer find |
| 626 | * any such pairs. |
| 627 | * |
| 628 | * The union of the cpus_allowed masks from the set of |
| 629 | * all cpusets having the same 'pn' value then form the one |
| 630 | * element of the partition (one sched domain) to be passed to |
| 631 | * partition_sched_domains(). |
| 632 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 633 | static int generate_sched_domains(cpumask_var_t **domains, |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 634 | struct sched_domain_attr **attributes) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 635 | { |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 636 | struct cpuset *cp; /* scans q */ |
| 637 | struct cpuset **csa; /* array of all cpuset ptrs */ |
| 638 | int csn; /* how many cpuset ptrs in csa so far */ |
| 639 | int i, j, k; /* indices for partition finding loops */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 640 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 641 | cpumask_var_t non_isolated_cpus; /* load balanced CPUs */ |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 642 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
Ingo Molnar | 1583715 | 2008-11-25 10:27:49 +0100 | [diff] [blame] | 643 | int ndoms = 0; /* number of sched domains in result */ |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 644 | int nslot; /* next empty doms[] struct cpumask slot */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 645 | struct cgroup_subsys_state *pos_css; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 646 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 647 | doms = NULL; |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 648 | dattr = NULL; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 649 | csa = NULL; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 650 | |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 651 | if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL)) |
| 652 | goto done; |
| 653 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
| 654 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 655 | /* Special case for the 99% of systems with one, full, sched domain */ |
| 656 | if (is_sched_load_balance(&top_cpuset)) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 657 | ndoms = 1; |
| 658 | doms = alloc_sched_domains(ndoms); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 659 | if (!doms) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 660 | goto done; |
| 661 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 662 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
| 663 | if (dattr) { |
| 664 | *dattr = SD_ATTR_INIT; |
Li Zefan | 93a6557 | 2008-07-29 22:33:23 -0700 | [diff] [blame] | 665 | update_domain_attr_tree(dattr, &top_cpuset); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 666 | } |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 667 | cpumask_and(doms[0], top_cpuset.effective_cpus, |
| 668 | non_isolated_cpus); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 669 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 670 | goto done; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 671 | } |
| 672 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 673 | csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 674 | if (!csa) |
| 675 | goto done; |
| 676 | csn = 0; |
| 677 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 678 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 679 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 680 | if (cp == &top_cpuset) |
| 681 | continue; |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 682 | /* |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 683 | * Continue traversing beyond @cp iff @cp has some CPUs and |
| 684 | * isn't load balancing. The former is obvious. The |
| 685 | * latter: All child cpusets contain a subset of the |
| 686 | * parent's cpus, so just skip them, and then we call |
| 687 | * update_domain_attr_tree() to calc relax_domain_level of |
| 688 | * the corresponding sched domain. |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 689 | */ |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 690 | if (!cpumask_empty(cp->cpus_allowed) && |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 691 | !(is_sched_load_balance(cp) && |
| 692 | cpumask_intersects(cp->cpus_allowed, non_isolated_cpus))) |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 693 | continue; |
Lai Jiangshan | 489a539 | 2008-07-25 01:47:23 -0700 | [diff] [blame] | 694 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 695 | if (is_sched_load_balance(cp)) |
| 696 | csa[csn++] = cp; |
| 697 | |
| 698 | /* skip @cp's subtree */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 699 | pos_css = css_rightmost_descendant(pos_css); |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 700 | } |
| 701 | rcu_read_unlock(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 702 | |
| 703 | for (i = 0; i < csn; i++) |
| 704 | csa[i]->pn = i; |
| 705 | ndoms = csn; |
| 706 | |
| 707 | restart: |
| 708 | /* Find the best partition (set of sched domains) */ |
| 709 | for (i = 0; i < csn; i++) { |
| 710 | struct cpuset *a = csa[i]; |
| 711 | int apn = a->pn; |
| 712 | |
| 713 | for (j = 0; j < csn; j++) { |
| 714 | struct cpuset *b = csa[j]; |
| 715 | int bpn = b->pn; |
| 716 | |
| 717 | if (apn != bpn && cpusets_overlap(a, b)) { |
| 718 | for (k = 0; k < csn; k++) { |
| 719 | struct cpuset *c = csa[k]; |
| 720 | |
| 721 | if (c->pn == bpn) |
| 722 | c->pn = apn; |
| 723 | } |
| 724 | ndoms--; /* one less element */ |
| 725 | goto restart; |
| 726 | } |
| 727 | } |
| 728 | } |
| 729 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 730 | /* |
| 731 | * Now we know how many domains to create. |
| 732 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
| 733 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 734 | doms = alloc_sched_domains(ndoms); |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 735 | if (!doms) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 736 | goto done; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 737 | |
| 738 | /* |
| 739 | * The rest of the code, including the scheduler, can deal with |
| 740 | * dattr==NULL case. No need to abort if alloc fails. |
| 741 | */ |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 742 | dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 743 | |
| 744 | for (nslot = 0, i = 0; i < csn; i++) { |
| 745 | struct cpuset *a = csa[i]; |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 746 | struct cpumask *dp; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 747 | int apn = a->pn; |
| 748 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 749 | if (apn < 0) { |
| 750 | /* Skip completed partitions */ |
| 751 | continue; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 752 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 753 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 754 | dp = doms[nslot]; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 755 | |
| 756 | if (nslot == ndoms) { |
| 757 | static int warnings = 10; |
| 758 | if (warnings) { |
Fabian Frederick | 12d3089 | 2014-05-05 19:49:00 +0200 | [diff] [blame] | 759 | pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", |
| 760 | nslot, ndoms, csn, i, apn); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 761 | warnings--; |
| 762 | } |
| 763 | continue; |
| 764 | } |
| 765 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 766 | cpumask_clear(dp); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 767 | if (dattr) |
| 768 | *(dattr + nslot) = SD_ATTR_INIT; |
| 769 | for (j = i; j < csn; j++) { |
| 770 | struct cpuset *b = csa[j]; |
| 771 | |
| 772 | if (apn == b->pn) { |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 773 | cpumask_or(dp, dp, b->effective_cpus); |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 774 | cpumask_and(dp, dp, non_isolated_cpus); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 775 | if (dattr) |
| 776 | update_domain_attr_tree(dattr + nslot, b); |
| 777 | |
| 778 | /* Done with this partition */ |
| 779 | b->pn = -1; |
| 780 | } |
| 781 | } |
| 782 | nslot++; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 783 | } |
| 784 | BUG_ON(nslot != ndoms); |
| 785 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 786 | done: |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 787 | free_cpumask_var(non_isolated_cpus); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 788 | kfree(csa); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 789 | |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 790 | /* |
| 791 | * Fallback to the default domain if kmalloc() failed. |
| 792 | * See comments in partition_sched_domains(). |
| 793 | */ |
| 794 | if (doms == NULL) |
| 795 | ndoms = 1; |
| 796 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 797 | *domains = doms; |
| 798 | *attributes = dattr; |
| 799 | return ndoms; |
| 800 | } |
| 801 | |
| 802 | /* |
| 803 | * Rebuild scheduler domains. |
| 804 | * |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 805 | * If the flag 'sched_load_balance' of any cpuset with non-empty |
| 806 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset |
| 807 | * which has that flag enabled, or if any cpuset with a non-empty |
| 808 | * 'cpus' is removed, then call this routine to rebuild the |
| 809 | * scheduler's dynamic sched domains. |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 810 | * |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 811 | */ |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 812 | static void rebuild_sched_domains_unlocked(void) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 813 | { |
| 814 | struct sched_domain_attr *attr; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 815 | cpumask_var_t *doms; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 816 | int ndoms; |
| 817 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 818 | cpu_hotplug_mutex_held(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 819 | lockdep_assert_held(&cpuset_mutex); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 820 | |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 821 | /* |
| 822 | * We have raced with CPU hotplug. Don't do anything to avoid |
| 823 | * passing doms with offlined cpu to partition_sched_domains(). |
| 824 | * Anyways, hotplug work item will rebuild sched domains. |
| 825 | */ |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 826 | if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 827 | return; |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 828 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 829 | /* Generate domain masks and attrs */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 830 | ndoms = generate_sched_domains(&doms, &attr); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 831 | |
| 832 | /* Have scheduler rebuild the domains */ |
| 833 | partition_sched_domains(ndoms, doms, attr); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 834 | } |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 835 | #else /* !CONFIG_SMP */ |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 836 | static void rebuild_sched_domains_unlocked(void) |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 837 | { |
| 838 | } |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 839 | #endif /* CONFIG_SMP */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 840 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 841 | void rebuild_sched_domains(void) |
| 842 | { |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 843 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 844 | mutex_lock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 845 | rebuild_sched_domains_unlocked(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 846 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 847 | put_online_cpus(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 848 | } |
| 849 | |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 850 | /** |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 851 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. |
| 852 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed |
| 853 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 854 | * Iterate through each task of @cs updating its cpus_allowed to the |
| 855 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 856 | * cpuset membership stays stable. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 857 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 858 | static void update_tasks_cpumask(struct cpuset *cs) |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 859 | { |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 860 | struct css_task_iter it; |
| 861 | struct task_struct *task; |
| 862 | |
| 863 | css_task_iter_start(&cs->css, &it); |
| 864 | while ((task = css_task_iter_next(&it))) |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 865 | set_cpus_allowed_ptr(task, cs->effective_cpus); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 866 | css_task_iter_end(&it); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 867 | } |
| 868 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 869 | /* |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 870 | * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree |
| 871 | * @cs: the cpuset to consider |
| 872 | * @new_cpus: temp variable for calculating new effective_cpus |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 873 | * |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 874 | * When congifured cpumask is changed, the effective cpumasks of this cpuset |
| 875 | * and all its descendants need to be updated. |
| 876 | * |
| 877 | * On legacy hierachy, effective_cpus will be the same with cpu_allowed. |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 878 | * |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 879 | */ |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 880 | static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 881 | { |
| 882 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 883 | struct cgroup_subsys_state *pos_css; |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 884 | bool need_rebuild_sched_domains = false; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 885 | |
| 886 | rcu_read_lock(); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 887 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
| 888 | struct cpuset *parent = parent_cs(cp); |
| 889 | |
| 890 | cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus); |
| 891 | |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 892 | /* |
| 893 | * If it becomes empty, inherit the effective mask of the |
| 894 | * parent, which is guaranteed to have some CPUs. |
| 895 | */ |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 896 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
| 897 | cpumask_empty(new_cpus)) |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 898 | cpumask_copy(new_cpus, parent->effective_cpus); |
| 899 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 900 | /* Skip the whole subtree if the cpumask remains the same. */ |
| 901 | if (cpumask_equal(new_cpus, cp->effective_cpus)) { |
| 902 | pos_css = css_rightmost_descendant(pos_css); |
| 903 | continue; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 904 | } |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 905 | |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 906 | if (!css_tryget_online(&cp->css)) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 907 | continue; |
| 908 | rcu_read_unlock(); |
| 909 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 910 | spin_lock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 911 | cpumask_copy(cp->effective_cpus, new_cpus); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 912 | spin_unlock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 913 | |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 914 | WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 915 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
| 916 | |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 917 | update_tasks_cpumask(cp); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 918 | |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 919 | /* |
| 920 | * If the effective cpumask of any non-empty cpuset is changed, |
| 921 | * we need to rebuild sched domains. |
| 922 | */ |
| 923 | if (!cpumask_empty(cp->cpus_allowed) && |
| 924 | is_sched_load_balance(cp)) |
| 925 | need_rebuild_sched_domains = true; |
| 926 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 927 | rcu_read_lock(); |
| 928 | css_put(&cp->css); |
| 929 | } |
| 930 | rcu_read_unlock(); |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 931 | |
| 932 | if (need_rebuild_sched_domains) |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 933 | rebuild_sched_domains_unlocked(); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 934 | } |
| 935 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 936 | /** |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 937 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it |
| 938 | * @cs: the cpuset to consider |
Fabian Frederick | fc34ac1 | 2014-05-05 19:46:55 +0200 | [diff] [blame] | 939 | * @trialcs: trial cpuset |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 940 | * @buf: buffer of cpu numbers written to this cpuset |
| 941 | */ |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 942 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
| 943 | const char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | { |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 945 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 | |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 947 | /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 948 | if (cs == &top_cpuset) |
| 949 | return -EACCES; |
| 950 | |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 951 | /* |
Paul Jackson | c8d9c90 | 2008-02-07 00:14:46 -0800 | [diff] [blame] | 952 | * An empty cpus_allowed is ok only if the cpuset has no tasks. |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 953 | * Since cpulist_parse() fails on an empty mask, we special case |
| 954 | * that parsing. The validate_change() call ensures that cpusets |
| 955 | * with tasks have cpus. |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 956 | */ |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 957 | if (!*buf) { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 958 | cpumask_clear(trialcs->cpus_allowed); |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 959 | } else { |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 960 | retval = cpulist_parse(buf, trialcs->cpus_requested); |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 961 | if (retval < 0) |
| 962 | return retval; |
Lai Jiangshan | 3734074 | 2008-06-05 22:46:32 -0700 | [diff] [blame] | 963 | |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 964 | if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask)) |
Lai Jiangshan | 3734074 | 2008-06-05 22:46:32 -0700 | [diff] [blame] | 965 | return -EINVAL; |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 966 | |
| 967 | cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask); |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 968 | } |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 969 | |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 970 | /* Nothing to do if the cpus didn't change */ |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 971 | if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 972 | return 0; |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 973 | |
Li Zefan | a73456f | 2013-06-05 17:15:59 +0800 | [diff] [blame] | 974 | retval = validate_change(cs, trialcs); |
| 975 | if (retval < 0) |
| 976 | return retval; |
| 977 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 978 | spin_lock_irq(&callback_lock); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 979 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 980 | cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 981 | spin_unlock_irq(&callback_lock); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 982 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 983 | /* use trialcs->cpus_allowed as a temp variable */ |
| 984 | update_cpumasks_hier(cs, trialcs->cpus_allowed); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 985 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 986 | } |
| 987 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 988 | /* |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 989 | * Migrate memory region from one set of nodes to another. This is |
| 990 | * performed asynchronously as it can be called from process migration path |
| 991 | * holding locks involved in process management. All mm migrations are |
| 992 | * performed in the queued order and can be waited for by flushing |
| 993 | * cpuset_migrate_mm_wq. |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 994 | */ |
| 995 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 996 | struct cpuset_migrate_mm_work { |
| 997 | struct work_struct work; |
| 998 | struct mm_struct *mm; |
| 999 | nodemask_t from; |
| 1000 | nodemask_t to; |
| 1001 | }; |
| 1002 | |
| 1003 | static void cpuset_migrate_mm_workfn(struct work_struct *work) |
| 1004 | { |
| 1005 | struct cpuset_migrate_mm_work *mwork = |
| 1006 | container_of(work, struct cpuset_migrate_mm_work, work); |
| 1007 | |
| 1008 | /* on a wq worker, no need to worry about %current's mems_allowed */ |
| 1009 | do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); |
| 1010 | mmput(mwork->mm); |
| 1011 | kfree(mwork); |
| 1012 | } |
| 1013 | |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1014 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, |
| 1015 | const nodemask_t *to) |
| 1016 | { |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1017 | struct cpuset_migrate_mm_work *mwork; |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1018 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1019 | mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); |
| 1020 | if (mwork) { |
| 1021 | mwork->mm = mm; |
| 1022 | mwork->from = *from; |
| 1023 | mwork->to = *to; |
| 1024 | INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); |
| 1025 | queue_work(cpuset_migrate_mm_wq, &mwork->work); |
| 1026 | } else { |
| 1027 | mmput(mm); |
| 1028 | } |
| 1029 | } |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1030 | |
Tejun Heo | 5cf1cac | 2016-04-21 19:06:48 -0400 | [diff] [blame] | 1031 | static void cpuset_post_attach(void) |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1032 | { |
| 1033 | flush_workqueue(cpuset_migrate_mm_wq); |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1034 | } |
| 1035 | |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1036 | /* |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1037 | * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy |
| 1038 | * @tsk: the task to change |
| 1039 | * @newmems: new nodes that the task will be set |
| 1040 | * |
| 1041 | * In order to avoid seeing no nodes if the old and new nodes are disjoint, |
| 1042 | * we structure updates as setting all new allowed nodes, then clearing newly |
| 1043 | * disallowed ones. |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1044 | */ |
| 1045 | static void cpuset_change_task_nodemask(struct task_struct *tsk, |
| 1046 | nodemask_t *newmems) |
| 1047 | { |
David Rientjes | b246272 | 2011-12-19 17:11:52 -0800 | [diff] [blame] | 1048 | bool need_loop; |
David Rientjes | 89e8a24 | 2011-11-02 13:38:39 -0700 | [diff] [blame] | 1049 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1050 | task_lock(tsk); |
David Rientjes | b246272 | 2011-12-19 17:11:52 -0800 | [diff] [blame] | 1051 | /* |
| 1052 | * Determine if a loop is necessary if another thread is doing |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 1053 | * read_mems_allowed_begin(). If at least one node remains unchanged and |
David Rientjes | b246272 | 2011-12-19 17:11:52 -0800 | [diff] [blame] | 1054 | * tsk does not have a mempolicy, then an empty nodemask will not be |
| 1055 | * possible when mems_allowed is larger than a word. |
| 1056 | */ |
| 1057 | need_loop = task_has_mempolicy(tsk) || |
| 1058 | !nodes_intersects(*newmems, tsk->mems_allowed); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1059 | |
Peter Zijlstra | 0fc0287 | 2013-11-26 15:03:41 +0100 | [diff] [blame] | 1060 | if (need_loop) { |
| 1061 | local_irq_disable(); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1062 | write_seqcount_begin(&tsk->mems_allowed_seq); |
Peter Zijlstra | 0fc0287 | 2013-11-26 15:03:41 +0100 | [diff] [blame] | 1063 | } |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1064 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1065 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1066 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); |
| 1067 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1068 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1069 | tsk->mems_allowed = *newmems; |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1070 | |
Peter Zijlstra | 0fc0287 | 2013-11-26 15:03:41 +0100 | [diff] [blame] | 1071 | if (need_loop) { |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1072 | write_seqcount_end(&tsk->mems_allowed_seq); |
Peter Zijlstra | 0fc0287 | 2013-11-26 15:03:41 +0100 | [diff] [blame] | 1073 | local_irq_enable(); |
| 1074 | } |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1075 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1076 | task_unlock(tsk); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1077 | } |
| 1078 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1079 | static void *cpuset_being_rebound; |
| 1080 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1081 | /** |
| 1082 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. |
| 1083 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1084 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1085 | * Iterate through each task of @cs updating its mems_allowed to the |
| 1086 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 1087 | * cpuset membership stays stable. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1088 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1089 | static void update_tasks_nodemask(struct cpuset *cs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1090 | { |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1091 | static nodemask_t newmems; /* protected by cpuset_mutex */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1092 | struct css_task_iter it; |
| 1093 | struct task_struct *task; |
Paul Jackson | 59dac16 | 2006-01-08 01:01:52 -0800 | [diff] [blame] | 1094 | |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 1095 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1096 | |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1097 | guarantee_online_mems(cs, &newmems); |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1098 | |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1099 | /* |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1100 | * The mpol_rebind_mm() call takes mmap_sem, which we couldn't |
| 1101 | * take while holding tasklist_lock. Forks can happen - the |
| 1102 | * mpol_dup() cpuset_being_rebound check will catch such forks, |
| 1103 | * and rebind their vma mempolicies too. Because we still hold |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1104 | * the global cpuset_mutex, we know that no other rebind effort |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1105 | * will be contending for the global variable cpuset_being_rebound. |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1106 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 1107 | * is idempotent. Also migrate pages in each mm to new nodes. |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1108 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1109 | css_task_iter_start(&cs->css, &it); |
| 1110 | while ((task = css_task_iter_next(&it))) { |
| 1111 | struct mm_struct *mm; |
| 1112 | bool migrate; |
| 1113 | |
| 1114 | cpuset_change_task_nodemask(task, &newmems); |
| 1115 | |
| 1116 | mm = get_task_mm(task); |
| 1117 | if (!mm) |
| 1118 | continue; |
| 1119 | |
| 1120 | migrate = is_memory_migrate(cs); |
| 1121 | |
| 1122 | mpol_rebind_mm(mm, &cs->mems_allowed); |
| 1123 | if (migrate) |
| 1124 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1125 | else |
| 1126 | mmput(mm); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1127 | } |
| 1128 | css_task_iter_end(&it); |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1129 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1130 | /* |
| 1131 | * All the tasks' nodemasks have been updated, update |
| 1132 | * cs->old_mems_allowed. |
| 1133 | */ |
| 1134 | cs->old_mems_allowed = newmems; |
| 1135 | |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 1136 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1137 | cpuset_being_rebound = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | } |
| 1139 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1140 | /* |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1141 | * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree |
| 1142 | * @cs: the cpuset to consider |
| 1143 | * @new_mems: a temp variable for calculating new effective_mems |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1144 | * |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1145 | * When configured nodemask is changed, the effective nodemasks of this cpuset |
| 1146 | * and all its descendants need to be updated. |
| 1147 | * |
| 1148 | * On legacy hiearchy, effective_mems will be the same with mems_allowed. |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1149 | * |
| 1150 | * Called with cpuset_mutex held |
| 1151 | */ |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1152 | static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1153 | { |
| 1154 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 1155 | struct cgroup_subsys_state *pos_css; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1156 | |
| 1157 | rcu_read_lock(); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1158 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
| 1159 | struct cpuset *parent = parent_cs(cp); |
| 1160 | |
| 1161 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); |
| 1162 | |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1163 | /* |
| 1164 | * If it becomes empty, inherit the effective mask of the |
| 1165 | * parent, which is guaranteed to have some MEMs. |
| 1166 | */ |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 1167 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
| 1168 | nodes_empty(*new_mems)) |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1169 | *new_mems = parent->effective_mems; |
| 1170 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1171 | /* Skip the whole subtree if the nodemask remains the same. */ |
| 1172 | if (nodes_equal(*new_mems, cp->effective_mems)) { |
| 1173 | pos_css = css_rightmost_descendant(pos_css); |
| 1174 | continue; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1175 | } |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1176 | |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 1177 | if (!css_tryget_online(&cp->css)) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1178 | continue; |
| 1179 | rcu_read_unlock(); |
| 1180 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1181 | spin_lock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1182 | cp->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1183 | spin_unlock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1184 | |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 1185 | WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
Li Zefan | a138126 | 2014-07-30 15:07:13 +0800 | [diff] [blame] | 1186 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1187 | |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1188 | update_tasks_nodemask(cp); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1189 | |
| 1190 | rcu_read_lock(); |
| 1191 | css_put(&cp->css); |
| 1192 | } |
| 1193 | rcu_read_unlock(); |
| 1194 | } |
| 1195 | |
| 1196 | /* |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1197 | * Handle user request to change the 'mems' memory placement |
| 1198 | * of a cpuset. Needs to validate the request, update the |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1199 | * cpusets mems_allowed, and for each task in the cpuset, |
| 1200 | * update mems_allowed and rebind task's mempolicy and any vma |
| 1201 | * mempolicies and if the cpuset is marked 'memory_migrate', |
| 1202 | * migrate the tasks pages to the new memory. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1203 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1204 | * Call with cpuset_mutex held. May take callback_lock during call. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1205 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, |
| 1206 | * lock each such tasks mm->mmap_sem, scan its vma's and rebind |
| 1207 | * their mempolicies to the cpusets new mems_allowed. |
| 1208 | */ |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1209 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
| 1210 | const char *buf) |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1211 | { |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1212 | int retval; |
| 1213 | |
| 1214 | /* |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 1215 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1216 | * it's read-only |
| 1217 | */ |
Miao Xie | 53feb29 | 2010-03-23 13:35:35 -0700 | [diff] [blame] | 1218 | if (cs == &top_cpuset) { |
| 1219 | retval = -EACCES; |
| 1220 | goto done; |
| 1221 | } |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1222 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1223 | /* |
| 1224 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. |
| 1225 | * Since nodelist_parse() fails on an empty mask, we special case |
| 1226 | * that parsing. The validate_change() call ensures that cpusets |
| 1227 | * with tasks have memory. |
| 1228 | */ |
| 1229 | if (!*buf) { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1230 | nodes_clear(trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1231 | } else { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1232 | retval = nodelist_parse(buf, trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1233 | if (retval < 0) |
| 1234 | goto done; |
| 1235 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1236 | if (!nodes_subset(trialcs->mems_allowed, |
Li Zefan | 5d8ba82 | 2014-07-09 16:49:12 +0800 | [diff] [blame] | 1237 | top_cpuset.mems_allowed)) { |
| 1238 | retval = -EINVAL; |
Miao Xie | 53feb29 | 2010-03-23 13:35:35 -0700 | [diff] [blame] | 1239 | goto done; |
| 1240 | } |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1241 | } |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1242 | |
| 1243 | if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1244 | retval = 0; /* Too easy - nothing to do */ |
| 1245 | goto done; |
| 1246 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1247 | retval = validate_change(cs, trialcs); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1248 | if (retval < 0) |
| 1249 | goto done; |
| 1250 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1251 | spin_lock_irq(&callback_lock); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1252 | cs->mems_allowed = trialcs->mems_allowed; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1253 | spin_unlock_irq(&callback_lock); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1254 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1255 | /* use trialcs->mems_allowed as a temp variable */ |
Alban Crequy | 24ee3cf | 2015-08-06 16:21:05 +0200 | [diff] [blame] | 1256 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1257 | done: |
| 1258 | return retval; |
| 1259 | } |
| 1260 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1261 | int current_cpuset_is_being_rebound(void) |
| 1262 | { |
Gu Zheng | 391acf9 | 2014-06-25 09:57:18 +0800 | [diff] [blame] | 1263 | int ret; |
| 1264 | |
| 1265 | rcu_read_lock(); |
| 1266 | ret = task_cs(current) == cpuset_being_rebound; |
| 1267 | rcu_read_unlock(); |
| 1268 | |
| 1269 | return ret; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1270 | } |
| 1271 | |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1272 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1273 | { |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1274 | #ifdef CONFIG_SMP |
Peter Zijlstra | 60495e7 | 2011-04-07 14:10:04 +0200 | [diff] [blame] | 1275 | if (val < -1 || val >= sched_domain_level_max) |
Li Zefan | 30e0e17 | 2008-05-13 10:27:17 +0800 | [diff] [blame] | 1276 | return -EINVAL; |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1277 | #endif |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1278 | |
| 1279 | if (val != cs->relax_domain_level) { |
| 1280 | cs->relax_domain_level = val; |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1281 | if (!cpumask_empty(cs->cpus_allowed) && |
| 1282 | is_sched_load_balance(cs)) |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1283 | rebuild_sched_domains_unlocked(); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1284 | } |
| 1285 | |
| 1286 | return 0; |
| 1287 | } |
| 1288 | |
Tejun Heo | 72ec702 | 2013-08-08 20:11:26 -0400 | [diff] [blame] | 1289 | /** |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1290 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
| 1291 | * @cs: the cpuset in which each task's spread flags needs to be changed |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1292 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1293 | * Iterate through each task of @cs updating its spread flags. As this |
| 1294 | * function is called with cpuset_mutex held, cpuset membership stays |
| 1295 | * stable. |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1296 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1297 | static void update_tasks_flags(struct cpuset *cs) |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1298 | { |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1299 | struct css_task_iter it; |
| 1300 | struct task_struct *task; |
| 1301 | |
| 1302 | css_task_iter_start(&cs->css, &it); |
| 1303 | while ((task = css_task_iter_next(&it))) |
| 1304 | cpuset_update_task_spread_flag(cs, task); |
| 1305 | css_task_iter_end(&it); |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1306 | } |
| 1307 | |
| 1308 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1309 | * update_flag - read a 0 or a 1 in a file and update associated flag |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1310 | * bit: the bit to update (see cpuset_flagbits_t) |
| 1311 | * cs: the cpuset to update |
| 1312 | * turning_on: whether the flag is being set or cleared |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1313 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | */ |
| 1315 | |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1316 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
| 1317 | int turning_on) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1319 | struct cpuset *trialcs; |
Rakib Mullick | 40b6a76 | 2008-10-18 20:28:18 -0700 | [diff] [blame] | 1320 | int balance_flag_changed; |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1321 | int spread_flag_changed; |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1322 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1323 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1324 | trialcs = alloc_trial_cpuset(cs); |
| 1325 | if (!trialcs) |
| 1326 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1328 | if (turning_on) |
| 1329 | set_bit(bit, &trialcs->flags); |
| 1330 | else |
| 1331 | clear_bit(bit, &trialcs->flags); |
| 1332 | |
| 1333 | err = validate_change(cs, trialcs); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1334 | if (err < 0) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1335 | goto out; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1336 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1337 | balance_flag_changed = (is_sched_load_balance(cs) != |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1338 | is_sched_load_balance(trialcs)); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1339 | |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1340 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
| 1341 | || (is_spread_page(cs) != is_spread_page(trialcs))); |
| 1342 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1343 | spin_lock_irq(&callback_lock); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1344 | cs->flags = trialcs->flags; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1345 | spin_unlock_irq(&callback_lock); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1346 | |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1347 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1348 | rebuild_sched_domains_unlocked(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1349 | |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1350 | if (spread_flag_changed) |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1351 | update_tasks_flags(cs); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1352 | out: |
| 1353 | free_trial_cpuset(trialcs); |
| 1354 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 | } |
| 1356 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1357 | /* |
Adrian Bunk | 80f7228 | 2006-06-30 18:27:16 +0200 | [diff] [blame] | 1358 | * Frequency meter - How fast is some event occurring? |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1359 | * |
| 1360 | * These routines manage a digitally filtered, constant time based, |
| 1361 | * event frequency meter. There are four routines: |
| 1362 | * fmeter_init() - initialize a frequency meter. |
| 1363 | * fmeter_markevent() - called each time the event happens. |
| 1364 | * fmeter_getrate() - returns the recent rate of such events. |
| 1365 | * fmeter_update() - internal routine used to update fmeter. |
| 1366 | * |
| 1367 | * A common data structure is passed to each of these routines, |
| 1368 | * which is used to keep track of the state required to manage the |
| 1369 | * frequency meter and its digital filter. |
| 1370 | * |
| 1371 | * The filter works on the number of events marked per unit time. |
| 1372 | * The filter is single-pole low-pass recursive (IIR). The time unit |
| 1373 | * is 1 second. Arithmetic is done using 32-bit integers scaled to |
| 1374 | * simulate 3 decimal digits of precision (multiplied by 1000). |
| 1375 | * |
| 1376 | * With an FM_COEF of 933, and a time base of 1 second, the filter |
| 1377 | * has a half-life of 10 seconds, meaning that if the events quit |
| 1378 | * happening, then the rate returned from the fmeter_getrate() |
| 1379 | * will be cut in half each 10 seconds, until it converges to zero. |
| 1380 | * |
| 1381 | * It is not worth doing a real infinitely recursive filter. If more |
| 1382 | * than FM_MAXTICKS ticks have elapsed since the last filter event, |
| 1383 | * just compute FM_MAXTICKS ticks worth, by which point the level |
| 1384 | * will be stable. |
| 1385 | * |
| 1386 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid |
| 1387 | * arithmetic overflow in the fmeter_update() routine. |
| 1388 | * |
| 1389 | * Given the simple 32 bit integer arithmetic used, this meter works |
| 1390 | * best for reporting rates between one per millisecond (msec) and |
| 1391 | * one per 32 (approx) seconds. At constant rates faster than one |
| 1392 | * per msec it maxes out at values just under 1,000,000. At constant |
| 1393 | * rates between one per msec, and one per second it will stabilize |
| 1394 | * to a value N*1000, where N is the rate of events per second. |
| 1395 | * At constant rates between one per second and one per 32 seconds, |
| 1396 | * it will be choppy, moving up on the seconds that have an event, |
| 1397 | * and then decaying until the next event. At rates slower than |
| 1398 | * about one in 32 seconds, it decays all the way back to zero between |
| 1399 | * each event. |
| 1400 | */ |
| 1401 | |
| 1402 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ |
Arnd Bergmann | d2b4365 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 1403 | #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1404 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ |
| 1405 | #define FM_SCALE 1000 /* faux fixed point scale */ |
| 1406 | |
| 1407 | /* Initialize a frequency meter */ |
| 1408 | static void fmeter_init(struct fmeter *fmp) |
| 1409 | { |
| 1410 | fmp->cnt = 0; |
| 1411 | fmp->val = 0; |
| 1412 | fmp->time = 0; |
| 1413 | spin_lock_init(&fmp->lock); |
| 1414 | } |
| 1415 | |
| 1416 | /* Internal meter update - process cnt events and update value */ |
| 1417 | static void fmeter_update(struct fmeter *fmp) |
| 1418 | { |
Arnd Bergmann | d2b4365 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 1419 | time64_t now; |
| 1420 | u32 ticks; |
| 1421 | |
| 1422 | now = ktime_get_seconds(); |
| 1423 | ticks = now - fmp->time; |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1424 | |
| 1425 | if (ticks == 0) |
| 1426 | return; |
| 1427 | |
| 1428 | ticks = min(FM_MAXTICKS, ticks); |
| 1429 | while (ticks-- > 0) |
| 1430 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; |
| 1431 | fmp->time = now; |
| 1432 | |
| 1433 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; |
| 1434 | fmp->cnt = 0; |
| 1435 | } |
| 1436 | |
| 1437 | /* Process any previous ticks, then bump cnt by one (times scale). */ |
| 1438 | static void fmeter_markevent(struct fmeter *fmp) |
| 1439 | { |
| 1440 | spin_lock(&fmp->lock); |
| 1441 | fmeter_update(fmp); |
| 1442 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); |
| 1443 | spin_unlock(&fmp->lock); |
| 1444 | } |
| 1445 | |
| 1446 | /* Process any previous ticks, then return current value. */ |
| 1447 | static int fmeter_getrate(struct fmeter *fmp) |
| 1448 | { |
| 1449 | int val; |
| 1450 | |
| 1451 | spin_lock(&fmp->lock); |
| 1452 | fmeter_update(fmp); |
| 1453 | val = fmp->val; |
| 1454 | spin_unlock(&fmp->lock); |
| 1455 | return val; |
| 1456 | } |
| 1457 | |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 1458 | static struct cpuset *cpuset_attach_old_cs; |
| 1459 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1460 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1461 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1462 | { |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1463 | struct cgroup_subsys_state *css; |
| 1464 | struct cpuset *cs; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1465 | struct task_struct *task; |
| 1466 | int ret; |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1467 | |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 1468 | /* used later by cpuset_attach() */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1469 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
| 1470 | cs = css_cs(css); |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 1471 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1472 | mutex_lock(&cpuset_mutex); |
| 1473 | |
Tejun Heo | aa6ec29 | 2014-07-09 10:08:08 -0400 | [diff] [blame] | 1474 | /* allow moving tasks into an empty cpuset if on default hierarchy */ |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1475 | ret = -ENOSPC; |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 1476 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
Li Zefan | 88fa523 | 2013-06-09 17:16:46 +0800 | [diff] [blame] | 1477 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1478 | goto out_unlock; |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 1479 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1480 | cgroup_taskset_for_each(task, css, tset) { |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 1481 | ret = task_can_attach(task, cs->cpus_allowed); |
| 1482 | if (ret) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1483 | goto out_unlock; |
| 1484 | ret = security_task_setscheduler(task); |
| 1485 | if (ret) |
| 1486 | goto out_unlock; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1487 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1488 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1489 | /* |
| 1490 | * Mark attach is in progress. This makes validate_change() fail |
| 1491 | * changes which zero cpus/mems_allowed. |
| 1492 | */ |
| 1493 | cs->attach_in_progress++; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1494 | ret = 0; |
| 1495 | out_unlock: |
| 1496 | mutex_unlock(&cpuset_mutex); |
| 1497 | return ret; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1498 | } |
| 1499 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1500 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1501 | { |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1502 | struct cgroup_subsys_state *css; |
| 1503 | struct cpuset *cs; |
| 1504 | |
| 1505 | cgroup_taskset_first(tset, &css); |
| 1506 | cs = css_cs(css); |
| 1507 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1508 | mutex_lock(&cpuset_mutex); |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1509 | css_cs(css)->attach_in_progress--; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1510 | mutex_unlock(&cpuset_mutex); |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1511 | } |
| 1512 | |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1513 | /* |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1514 | * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1515 | * but we can't allocate it dynamically there. Define it global and |
| 1516 | * allocate from cpuset_init(). |
| 1517 | */ |
| 1518 | static cpumask_var_t cpus_attach; |
| 1519 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1520 | static void cpuset_attach(struct cgroup_taskset *tset) |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1521 | { |
Li Zefan | 67bd2c5 | 2013-06-05 17:15:35 +0800 | [diff] [blame] | 1522 | /* static buf protected by cpuset_mutex */ |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1523 | static nodemask_t cpuset_attach_nodemask_to; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1524 | struct task_struct *task; |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 1525 | struct task_struct *leader; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1526 | struct cgroup_subsys_state *css; |
| 1527 | struct cpuset *cs; |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 1528 | struct cpuset *oldcs = cpuset_attach_old_cs; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1529 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1530 | cgroup_taskset_first(tset, &css); |
| 1531 | cs = css_cs(css); |
| 1532 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1533 | mutex_lock(&cpuset_mutex); |
| 1534 | |
Tejun Heo | 94196f5 | 2011-12-12 18:12:22 -0800 | [diff] [blame] | 1535 | /* prepare for attach */ |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1536 | if (cs == &top_cpuset) |
| 1537 | cpumask_copy(cpus_attach, cpu_possible_mask); |
| 1538 | else |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1539 | guarantee_online_cpus(cs, cpus_attach); |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1540 | |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1541 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); |
Tejun Heo | 94196f5 | 2011-12-12 18:12:22 -0800 | [diff] [blame] | 1542 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1543 | cgroup_taskset_for_each(task, css, tset) { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1544 | /* |
| 1545 | * can_attach beforehand should guarantee that this doesn't |
| 1546 | * fail. TODO: have a better way to handle failure here |
| 1547 | */ |
| 1548 | WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); |
| 1549 | |
| 1550 | cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); |
| 1551 | cpuset_update_task_spread_flag(cs, task); |
| 1552 | } |
David Quigley | 22fb52d | 2006-06-23 02:04:00 -0700 | [diff] [blame] | 1553 | |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1554 | /* |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 1555 | * Change mm for all threadgroup leaders. This is expensive and may |
| 1556 | * sleep and should be moved outside migration path proper. |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1557 | */ |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1558 | cpuset_attach_nodemask_to = cs->effective_mems; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1559 | cgroup_taskset_for_each_leader(leader, css, tset) { |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 1560 | struct mm_struct *mm = get_task_mm(leader); |
Li Zefan | f047cec | 2013-06-13 15:11:44 +0800 | [diff] [blame] | 1561 | |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 1562 | if (mm) { |
| 1563 | mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); |
| 1564 | |
| 1565 | /* |
| 1566 | * old_mems_allowed is the same with mems_allowed |
| 1567 | * here, except if this task is being moved |
| 1568 | * automatically due to hotplug. In that case |
| 1569 | * @mems_allowed has been updated and is empty, so |
| 1570 | * @old_mems_allowed is the right nodesets that we |
| 1571 | * migrate mm from. |
| 1572 | */ |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1573 | if (is_memory_migrate(cs)) |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 1574 | cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, |
| 1575 | &cpuset_attach_nodemask_to); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1576 | else |
| 1577 | mmput(mm); |
Li Zefan | f047cec | 2013-06-13 15:11:44 +0800 | [diff] [blame] | 1578 | } |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1579 | } |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1580 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1581 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
Tejun Heo | 02bb586 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1582 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1583 | cs->attach_in_progress--; |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 1584 | if (!cs->attach_in_progress) |
| 1585 | wake_up(&cpuset_attach_wq); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1586 | |
| 1587 | mutex_unlock(&cpuset_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | } |
| 1589 | |
| 1590 | /* The various types of files and directories in a cpuset file system */ |
| 1591 | |
| 1592 | typedef enum { |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1593 | FILE_MEMORY_MIGRATE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1594 | FILE_CPULIST, |
| 1595 | FILE_MEMLIST, |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1596 | FILE_EFFECTIVE_CPULIST, |
| 1597 | FILE_EFFECTIVE_MEMLIST, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | FILE_CPU_EXCLUSIVE, |
| 1599 | FILE_MEM_EXCLUSIVE, |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1600 | FILE_MEM_HARDWALL, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1601 | FILE_SCHED_LOAD_BALANCE, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1602 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1603 | FILE_MEMORY_PRESSURE_ENABLED, |
| 1604 | FILE_MEMORY_PRESSURE, |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 1605 | FILE_SPREAD_PAGE, |
| 1606 | FILE_SPREAD_SLAB, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1607 | } cpuset_filetype_t; |
| 1608 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1609 | static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, |
| 1610 | u64 val) |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1611 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1612 | struct cpuset *cs = css_cs(css); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1613 | cpuset_filetype_t type = cft->private; |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 1614 | int retval = 0; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1615 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1616 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1617 | mutex_lock(&cpuset_mutex); |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 1618 | if (!is_cpuset_online(cs)) { |
| 1619 | retval = -ENODEV; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1620 | goto out_unlock; |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 1621 | } |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1622 | |
| 1623 | switch (type) { |
| 1624 | case FILE_CPU_EXCLUSIVE: |
| 1625 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
| 1626 | break; |
| 1627 | case FILE_MEM_EXCLUSIVE: |
| 1628 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
| 1629 | break; |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1630 | case FILE_MEM_HARDWALL: |
| 1631 | retval = update_flag(CS_MEM_HARDWALL, cs, val); |
| 1632 | break; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1633 | case FILE_SCHED_LOAD_BALANCE: |
| 1634 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
| 1635 | break; |
| 1636 | case FILE_MEMORY_MIGRATE: |
| 1637 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
| 1638 | break; |
| 1639 | case FILE_MEMORY_PRESSURE_ENABLED: |
| 1640 | cpuset_memory_pressure_enabled = !!val; |
| 1641 | break; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1642 | case FILE_SPREAD_PAGE: |
| 1643 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1644 | break; |
| 1645 | case FILE_SPREAD_SLAB: |
| 1646 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1647 | break; |
| 1648 | default: |
| 1649 | retval = -EINVAL; |
| 1650 | break; |
| 1651 | } |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1652 | out_unlock: |
| 1653 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1654 | put_online_cpus(); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1655 | return retval; |
| 1656 | } |
| 1657 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1658 | static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, |
| 1659 | s64 val) |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1660 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1661 | struct cpuset *cs = css_cs(css); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1662 | cpuset_filetype_t type = cft->private; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1663 | int retval = -ENODEV; |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1664 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1665 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1666 | mutex_lock(&cpuset_mutex); |
| 1667 | if (!is_cpuset_online(cs)) |
| 1668 | goto out_unlock; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1669 | |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1670 | switch (type) { |
| 1671 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
| 1672 | retval = update_relax_domain_level(cs, val); |
| 1673 | break; |
| 1674 | default: |
| 1675 | retval = -EINVAL; |
| 1676 | break; |
| 1677 | } |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1678 | out_unlock: |
| 1679 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1680 | put_online_cpus(); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1681 | return retval; |
| 1682 | } |
| 1683 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1684 | /* |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1685 | * Common handling for a write to a "cpus" or "mems" file. |
| 1686 | */ |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1687 | static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, |
| 1688 | char *buf, size_t nbytes, loff_t off) |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1689 | { |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1690 | struct cpuset *cs = css_cs(of_css(of)); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1691 | struct cpuset *trialcs; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1692 | int retval = -ENODEV; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1693 | |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1694 | buf = strstrip(buf); |
| 1695 | |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1696 | /* |
| 1697 | * CPU or memory hotunplug may leave @cs w/o any execution |
| 1698 | * resources, in which case the hotplug code asynchronously updates |
| 1699 | * configuration and transfers all tasks to the nearest ancestor |
| 1700 | * which can execute. |
| 1701 | * |
| 1702 | * As writes to "cpus" or "mems" may restore @cs's execution |
| 1703 | * resources, wait for the previously scheduled operations before |
| 1704 | * proceeding, so that we don't end up keep removing tasks added |
| 1705 | * after execution capability is restored. |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 1706 | * |
| 1707 | * cpuset_hotplug_work calls back into cgroup core via |
| 1708 | * cgroup_transfer_tasks() and waiting for it from a cgroupfs |
| 1709 | * operation like this one can lead to a deadlock through kernfs |
| 1710 | * active_ref protection. Let's break the protection. Losing the |
| 1711 | * protection is okay as we check whether @cs is online after |
| 1712 | * grabbing cpuset_mutex anyway. This only happens on the legacy |
| 1713 | * hierarchies. |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1714 | */ |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 1715 | css_get(&cs->css); |
| 1716 | kernfs_break_active_protection(of->kn); |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1717 | flush_work(&cpuset_hotplug_work); |
| 1718 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1719 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1720 | mutex_lock(&cpuset_mutex); |
| 1721 | if (!is_cpuset_online(cs)) |
| 1722 | goto out_unlock; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1723 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1724 | trialcs = alloc_trial_cpuset(cs); |
Li Zefan | b75f38d | 2011-03-04 17:36:21 -0800 | [diff] [blame] | 1725 | if (!trialcs) { |
| 1726 | retval = -ENOMEM; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1727 | goto out_unlock; |
Li Zefan | b75f38d | 2011-03-04 17:36:21 -0800 | [diff] [blame] | 1728 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1729 | |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1730 | switch (of_cft(of)->private) { |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1731 | case FILE_CPULIST: |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1732 | retval = update_cpumask(cs, trialcs, buf); |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1733 | break; |
| 1734 | case FILE_MEMLIST: |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1735 | retval = update_nodemask(cs, trialcs, buf); |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1736 | break; |
| 1737 | default: |
| 1738 | retval = -EINVAL; |
| 1739 | break; |
| 1740 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1741 | |
| 1742 | free_trial_cpuset(trialcs); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1743 | out_unlock: |
| 1744 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1745 | put_online_cpus(); |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 1746 | kernfs_unbreak_active_protection(of->kn); |
| 1747 | css_put(&cs->css); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1748 | flush_workqueue(cpuset_migrate_mm_wq); |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1749 | return retval ?: nbytes; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1750 | } |
| 1751 | |
| 1752 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1753 | * These ascii lists should be read in a single call, by using a user |
| 1754 | * buffer large enough to hold the entire map. If read in smaller |
| 1755 | * chunks, there is no guarantee of atomicity. Since the display format |
| 1756 | * used, list of ranges of sequential numbers, is variable length, |
| 1757 | * and since these maps can change value dynamically, one could read |
| 1758 | * gibberish by doing partial reads while a list was changing. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 | */ |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 1760 | static int cpuset_common_seq_show(struct seq_file *sf, void *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1761 | { |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 1762 | struct cpuset *cs = css_cs(seq_css(sf)); |
| 1763 | cpuset_filetype_t type = seq_cft(sf)->private; |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 1764 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1765 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1766 | spin_lock_irq(&callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1767 | |
| 1768 | switch (type) { |
| 1769 | case FILE_CPULIST: |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1770 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1771 | break; |
| 1772 | case FILE_MEMLIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 1773 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1774 | break; |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1775 | case FILE_EFFECTIVE_CPULIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 1776 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1777 | break; |
| 1778 | case FILE_EFFECTIVE_MEMLIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 1779 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1780 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1781 | default: |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 1782 | ret = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1783 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1784 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1785 | spin_unlock_irq(&callback_lock); |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 1786 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1787 | } |
| 1788 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1789 | static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1790 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1791 | struct cpuset *cs = css_cs(css); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1792 | cpuset_filetype_t type = cft->private; |
| 1793 | switch (type) { |
| 1794 | case FILE_CPU_EXCLUSIVE: |
| 1795 | return is_cpu_exclusive(cs); |
| 1796 | case FILE_MEM_EXCLUSIVE: |
| 1797 | return is_mem_exclusive(cs); |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1798 | case FILE_MEM_HARDWALL: |
| 1799 | return is_mem_hardwall(cs); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1800 | case FILE_SCHED_LOAD_BALANCE: |
| 1801 | return is_sched_load_balance(cs); |
| 1802 | case FILE_MEMORY_MIGRATE: |
| 1803 | return is_memory_migrate(cs); |
| 1804 | case FILE_MEMORY_PRESSURE_ENABLED: |
| 1805 | return cpuset_memory_pressure_enabled; |
| 1806 | case FILE_MEMORY_PRESSURE: |
| 1807 | return fmeter_getrate(&cs->fmeter); |
| 1808 | case FILE_SPREAD_PAGE: |
| 1809 | return is_spread_page(cs); |
| 1810 | case FILE_SPREAD_SLAB: |
| 1811 | return is_spread_slab(cs); |
| 1812 | default: |
| 1813 | BUG(); |
| 1814 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1815 | |
| 1816 | /* Unreachable but makes gcc happy */ |
| 1817 | return 0; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1818 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1819 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1820 | static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1821 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1822 | struct cpuset *cs = css_cs(css); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1823 | cpuset_filetype_t type = cft->private; |
| 1824 | switch (type) { |
| 1825 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
| 1826 | return cs->relax_domain_level; |
| 1827 | default: |
| 1828 | BUG(); |
| 1829 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1830 | |
| 1831 | /* Unrechable but makes gcc happy */ |
| 1832 | return 0; |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1833 | } |
| 1834 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1835 | |
| 1836 | /* |
| 1837 | * for the common functions, 'private' gives the type of file |
| 1838 | */ |
| 1839 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1840 | static struct cftype files[] = { |
| 1841 | { |
| 1842 | .name = "cpus", |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 1843 | .seq_show = cpuset_common_seq_show, |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1844 | .write = cpuset_write_resmask, |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1845 | .max_write_len = (100U + 6 * NR_CPUS), |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1846 | .private = FILE_CPULIST, |
| 1847 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1848 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1849 | { |
| 1850 | .name = "mems", |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 1851 | .seq_show = cpuset_common_seq_show, |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1852 | .write = cpuset_write_resmask, |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1853 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1854 | .private = FILE_MEMLIST, |
| 1855 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1856 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1857 | { |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1858 | .name = "effective_cpus", |
| 1859 | .seq_show = cpuset_common_seq_show, |
| 1860 | .private = FILE_EFFECTIVE_CPULIST, |
| 1861 | }, |
| 1862 | |
| 1863 | { |
| 1864 | .name = "effective_mems", |
| 1865 | .seq_show = cpuset_common_seq_show, |
| 1866 | .private = FILE_EFFECTIVE_MEMLIST, |
| 1867 | }, |
| 1868 | |
| 1869 | { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1870 | .name = "cpu_exclusive", |
| 1871 | .read_u64 = cpuset_read_u64, |
| 1872 | .write_u64 = cpuset_write_u64, |
| 1873 | .private = FILE_CPU_EXCLUSIVE, |
| 1874 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1875 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1876 | { |
| 1877 | .name = "mem_exclusive", |
| 1878 | .read_u64 = cpuset_read_u64, |
| 1879 | .write_u64 = cpuset_write_u64, |
| 1880 | .private = FILE_MEM_EXCLUSIVE, |
| 1881 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1882 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1883 | { |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1884 | .name = "mem_hardwall", |
| 1885 | .read_u64 = cpuset_read_u64, |
| 1886 | .write_u64 = cpuset_write_u64, |
| 1887 | .private = FILE_MEM_HARDWALL, |
| 1888 | }, |
| 1889 | |
| 1890 | { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1891 | .name = "sched_load_balance", |
| 1892 | .read_u64 = cpuset_read_u64, |
| 1893 | .write_u64 = cpuset_write_u64, |
| 1894 | .private = FILE_SCHED_LOAD_BALANCE, |
| 1895 | }, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1896 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1897 | { |
| 1898 | .name = "sched_relax_domain_level", |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1899 | .read_s64 = cpuset_read_s64, |
| 1900 | .write_s64 = cpuset_write_s64, |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1901 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, |
| 1902 | }, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1903 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1904 | { |
| 1905 | .name = "memory_migrate", |
| 1906 | .read_u64 = cpuset_read_u64, |
| 1907 | .write_u64 = cpuset_write_u64, |
| 1908 | .private = FILE_MEMORY_MIGRATE, |
| 1909 | }, |
| 1910 | |
| 1911 | { |
| 1912 | .name = "memory_pressure", |
| 1913 | .read_u64 = cpuset_read_u64, |
Waiman Long | 309e4db | 2017-08-24 12:04:29 -0400 | [diff] [blame] | 1914 | .private = FILE_MEMORY_PRESSURE, |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1915 | }, |
| 1916 | |
| 1917 | { |
| 1918 | .name = "memory_spread_page", |
| 1919 | .read_u64 = cpuset_read_u64, |
| 1920 | .write_u64 = cpuset_write_u64, |
| 1921 | .private = FILE_SPREAD_PAGE, |
| 1922 | }, |
| 1923 | |
| 1924 | { |
| 1925 | .name = "memory_spread_slab", |
| 1926 | .read_u64 = cpuset_read_u64, |
| 1927 | .write_u64 = cpuset_write_u64, |
| 1928 | .private = FILE_SPREAD_SLAB, |
| 1929 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 1930 | |
| 1931 | { |
| 1932 | .name = "memory_pressure_enabled", |
| 1933 | .flags = CFTYPE_ONLY_ON_ROOT, |
| 1934 | .read_u64 = cpuset_read_u64, |
| 1935 | .write_u64 = cpuset_write_u64, |
| 1936 | .private = FILE_MEMORY_PRESSURE_ENABLED, |
| 1937 | }, |
| 1938 | |
| 1939 | { } /* terminate */ |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1940 | }; |
| 1941 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1942 | /* |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 1943 | * cpuset_css_alloc - allocate a cpuset css |
Li Zefan | c9e5fe6 | 2013-06-14 11:18:27 +0800 | [diff] [blame] | 1944 | * cgrp: control group that the new cpuset will be part of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1945 | */ |
| 1946 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1947 | static struct cgroup_subsys_state * |
| 1948 | cpuset_css_alloc(struct cgroup_subsys_state *parent_css) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1949 | { |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1950 | struct cpuset *cs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1951 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1952 | if (!parent_css) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1953 | return &top_cpuset.css; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 1954 | |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1955 | cs = kzalloc(sizeof(*cs), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1956 | if (!cs) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1957 | return ERR_PTR(-ENOMEM); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1958 | if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) |
| 1959 | goto free_cs; |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1960 | if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL)) |
| 1961 | goto free_allowed; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1962 | if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL)) |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1963 | goto free_requested; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1964 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1965 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1966 | cpumask_clear(cs->cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1967 | cpumask_clear(cs->cpus_requested); |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 1968 | nodes_clear(cs->mems_allowed); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1969 | cpumask_clear(cs->effective_cpus); |
| 1970 | nodes_clear(cs->effective_mems); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1971 | fmeter_init(&cs->fmeter); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1972 | cs->relax_domain_level = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1973 | |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1974 | return &cs->css; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1975 | |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1976 | free_requested: |
| 1977 | free_cpumask_var(cs->cpus_requested); |
| 1978 | free_allowed: |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1979 | free_cpumask_var(cs->cpus_allowed); |
| 1980 | free_cs: |
| 1981 | kfree(cs); |
| 1982 | return ERR_PTR(-ENOMEM); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1983 | } |
| 1984 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1985 | static int cpuset_css_online(struct cgroup_subsys_state *css) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1986 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1987 | struct cpuset *cs = css_cs(css); |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1988 | struct cpuset *parent = parent_cs(cs); |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1989 | struct cpuset *tmp_cs; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 1990 | struct cgroup_subsys_state *pos_css; |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1991 | |
| 1992 | if (!parent) |
| 1993 | return 0; |
| 1994 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1995 | mutex_lock(&cpuset_mutex); |
| 1996 | |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1997 | set_bit(CS_ONLINE, &cs->flags); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1998 | if (is_spread_page(parent)) |
| 1999 | set_bit(CS_SPREAD_PAGE, &cs->flags); |
| 2000 | if (is_spread_slab(parent)) |
| 2001 | set_bit(CS_SPREAD_SLAB, &cs->flags); |
| 2002 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 2003 | cpuset_inc(); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2004 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2005 | spin_lock_irq(&callback_lock); |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 2006 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2007 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
| 2008 | cs->effective_mems = parent->effective_mems; |
| 2009 | } |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2010 | spin_unlock_irq(&callback_lock); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2011 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2012 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2013 | goto out_unlock; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2014 | |
| 2015 | /* |
| 2016 | * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is |
| 2017 | * set. This flag handling is implemented in cgroup core for |
| 2018 | * histrical reasons - the flag may be specified during mount. |
| 2019 | * |
| 2020 | * Currently, if any sibling cpusets have exclusive cpus or mem, we |
| 2021 | * refuse to clone the configuration - thereby refusing the task to |
| 2022 | * be entered, and as a result refusing the sys_unshare() or |
| 2023 | * clone() which initiated it. If this becomes a problem for some |
| 2024 | * users who wish to allow that scenario, then this could be |
| 2025 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive |
| 2026 | * (and likewise for mems) to the new cgroup. |
| 2027 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2028 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2029 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2030 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
| 2031 | rcu_read_unlock(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2032 | goto out_unlock; |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2033 | } |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2034 | } |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2035 | rcu_read_unlock(); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2036 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2037 | spin_lock_irq(&callback_lock); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2038 | cs->mems_allowed = parent->mems_allowed; |
Zefan Li | 790317e | 2015-02-13 11:19:49 +0800 | [diff] [blame] | 2039 | cs->effective_mems = parent->mems_allowed; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2040 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2041 | cpumask_copy(cs->cpus_requested, parent->cpus_requested); |
Zefan Li | 790317e | 2015-02-13 11:19:49 +0800 | [diff] [blame] | 2042 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
Dan Carpenter | cea7446 | 2014-10-27 16:27:02 +0300 | [diff] [blame] | 2043 | spin_unlock_irq(&callback_lock); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2044 | out_unlock: |
| 2045 | mutex_unlock(&cpuset_mutex); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2046 | return 0; |
| 2047 | } |
| 2048 | |
Zhao Hongjiang | 0b9e696 | 2013-07-27 11:56:53 +0800 | [diff] [blame] | 2049 | /* |
| 2050 | * If the cpuset being removed has its flag 'sched_load_balance' |
| 2051 | * enabled, then simulate turning sched_load_balance off, which |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 2052 | * will call rebuild_sched_domains_unlocked(). |
Zhao Hongjiang | 0b9e696 | 2013-07-27 11:56:53 +0800 | [diff] [blame] | 2053 | */ |
| 2054 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2055 | static void cpuset_css_offline(struct cgroup_subsys_state *css) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2056 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2057 | struct cpuset *cs = css_cs(css); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2058 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 2059 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2060 | mutex_lock(&cpuset_mutex); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2061 | |
| 2062 | if (is_sched_load_balance(cs)) |
| 2063 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); |
| 2064 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 2065 | cpuset_dec(); |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2066 | clear_bit(CS_ONLINE, &cs->flags); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2067 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2068 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 2069 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2070 | } |
| 2071 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2072 | static void cpuset_css_free(struct cgroup_subsys_state *css) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2073 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2074 | struct cpuset *cs = css_cs(css); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2075 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2076 | free_cpumask_var(cs->effective_cpus); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2077 | free_cpumask_var(cs->cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2078 | free_cpumask_var(cs->cpus_requested); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2079 | kfree(cs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2080 | } |
| 2081 | |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2082 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
| 2083 | { |
| 2084 | mutex_lock(&cpuset_mutex); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2085 | spin_lock_irq(&callback_lock); |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2086 | |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 2087 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2088 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
| 2089 | top_cpuset.mems_allowed = node_possible_map; |
| 2090 | } else { |
| 2091 | cpumask_copy(top_cpuset.cpus_allowed, |
| 2092 | top_cpuset.effective_cpus); |
| 2093 | top_cpuset.mems_allowed = top_cpuset.effective_mems; |
| 2094 | } |
| 2095 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2096 | spin_unlock_irq(&callback_lock); |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2097 | mutex_unlock(&cpuset_mutex); |
| 2098 | } |
| 2099 | |
Zefan Li | 06f4e948 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2100 | /* |
| 2101 | * Make sure the new task conform to the current state of its parent, |
| 2102 | * which could have been changed by cpuset just after it inherits the |
| 2103 | * state from the parent and before it sits on the cgroup's task list. |
| 2104 | */ |
Wei Yongjun | 8a15b81 | 2016-09-16 13:02:37 +0000 | [diff] [blame] | 2105 | static void cpuset_fork(struct task_struct *task) |
Zefan Li | 06f4e948 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2106 | { |
| 2107 | if (task_css_is_root(task, cpuset_cgrp_id)) |
| 2108 | return; |
| 2109 | |
| 2110 | set_cpus_allowed_ptr(task, ¤t->cpus_allowed); |
| 2111 | task->mems_allowed = current->mems_allowed; |
| 2112 | } |
| 2113 | |
Riley Andrews | 84c517b | 2015-06-05 18:59:29 -0700 | [diff] [blame] | 2114 | static int cpuset_allow_attach(struct cgroup_taskset *tset) |
| 2115 | { |
| 2116 | const struct cred *cred = current_cred(), *tcred; |
| 2117 | struct task_struct *task; |
| 2118 | struct cgroup_subsys_state *css; |
| 2119 | |
| 2120 | cgroup_taskset_for_each(task, css, tset) { |
| 2121 | tcred = __task_cred(task); |
| 2122 | |
| 2123 | if ((current != task) && !capable(CAP_SYS_ADMIN) && |
| 2124 | cred->euid.val != tcred->uid.val && cred->euid.val != tcred->suid.val) |
| 2125 | return -EACCES; |
| 2126 | } |
| 2127 | |
| 2128 | return 0; |
| 2129 | } |
| 2130 | |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 2131 | struct cgroup_subsys cpuset_cgrp_subsys = { |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2132 | .css_alloc = cpuset_css_alloc, |
| 2133 | .css_online = cpuset_css_online, |
| 2134 | .css_offline = cpuset_css_offline, |
| 2135 | .css_free = cpuset_css_free, |
| 2136 | .can_attach = cpuset_can_attach, |
Riley Andrews | 84c517b | 2015-06-05 18:59:29 -0700 | [diff] [blame] | 2137 | .allow_attach = cpuset_allow_attach, |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2138 | .cancel_attach = cpuset_cancel_attach, |
| 2139 | .attach = cpuset_attach, |
Tejun Heo | 5cf1cac | 2016-04-21 19:06:48 -0400 | [diff] [blame] | 2140 | .post_attach = cpuset_post_attach, |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2141 | .bind = cpuset_bind, |
Zefan Li | 06f4e948 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2142 | .fork = cpuset_fork, |
Tejun Heo | 5577964 | 2014-07-15 11:05:09 -0400 | [diff] [blame] | 2143 | .legacy_cftypes = files, |
Tejun Heo | b38e42e | 2016-02-23 10:00:50 -0500 | [diff] [blame] | 2144 | .early_init = true, |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2145 | }; |
| 2146 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2147 | /** |
| 2148 | * cpuset_init - initialize cpusets at system boot |
| 2149 | * |
| 2150 | * Description: Initialize top_cpuset and the cpuset internal file system, |
| 2151 | **/ |
| 2152 | |
| 2153 | int __init cpuset_init(void) |
| 2154 | { |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2155 | int err = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2156 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 2157 | if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)) |
| 2158 | BUG(); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2159 | if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)) |
| 2160 | BUG(); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2161 | if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL)) |
| 2162 | BUG(); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 2163 | |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2164 | cpumask_setall(top_cpuset.cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2165 | cpumask_setall(top_cpuset.cpus_requested); |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 2166 | nodes_setall(top_cpuset.mems_allowed); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2167 | cpumask_setall(top_cpuset.effective_cpus); |
| 2168 | nodes_setall(top_cpuset.effective_mems); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2169 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2170 | fmeter_init(&top_cpuset.fmeter); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 2171 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2172 | top_cpuset.relax_domain_level = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2173 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2174 | err = register_filesystem(&cpuset_fs_type); |
| 2175 | if (err < 0) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2176 | return err; |
| 2177 | |
Li Zefan | 2341d1b | 2009-01-07 18:08:42 -0800 | [diff] [blame] | 2178 | if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)) |
| 2179 | BUG(); |
| 2180 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2181 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2182 | } |
| 2183 | |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2184 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2185 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2186 | * or memory nodes, we need to walk over the cpuset hierarchy, |
| 2187 | * removing that CPU or node from all cpusets. If this removes the |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2188 | * last CPU or node from a cpuset, then move the tasks in the empty |
| 2189 | * cpuset to its next-highest non-empty parent. |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2190 | */ |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2191 | static void remove_tasks_in_empty_cpuset(struct cpuset *cs) |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2192 | { |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2193 | struct cpuset *parent; |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2194 | |
Paul Jackson | c8d9c90 | 2008-02-07 00:14:46 -0800 | [diff] [blame] | 2195 | /* |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2196 | * Find its next-highest non-empty parent, (top cpuset |
| 2197 | * has online cpus, so can't be empty). |
| 2198 | */ |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2199 | parent = parent_cs(cs); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2200 | while (cpumask_empty(parent->cpus_allowed) || |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2201 | nodes_empty(parent->mems_allowed)) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2202 | parent = parent_cs(parent); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2203 | |
Tejun Heo | 8cc9934 | 2013-04-07 09:29:50 -0700 | [diff] [blame] | 2204 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
Fabian Frederick | 12d3089 | 2014-05-05 19:49:00 +0200 | [diff] [blame] | 2205 | pr_err("cpuset: failed to transfer tasks out of empty cpuset "); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 2206 | pr_cont_cgroup_name(cs->css.cgroup); |
| 2207 | pr_cont("\n"); |
Tejun Heo | 8cc9934 | 2013-04-07 09:29:50 -0700 | [diff] [blame] | 2208 | } |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2209 | } |
| 2210 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2211 | static void |
| 2212 | hotplug_update_tasks_legacy(struct cpuset *cs, |
| 2213 | struct cpumask *new_cpus, nodemask_t *new_mems, |
| 2214 | bool cpus_updated, bool mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2215 | { |
| 2216 | bool is_empty; |
| 2217 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2218 | spin_lock_irq(&callback_lock); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2219 | cpumask_copy(cs->cpus_allowed, new_cpus); |
| 2220 | cpumask_copy(cs->effective_cpus, new_cpus); |
| 2221 | cs->mems_allowed = *new_mems; |
| 2222 | cs->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2223 | spin_unlock_irq(&callback_lock); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2224 | |
| 2225 | /* |
| 2226 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, |
| 2227 | * as the tasks will be migratecd to an ancestor. |
| 2228 | */ |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2229 | if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2230 | update_tasks_cpumask(cs); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2231 | if (mems_updated && !nodes_empty(cs->mems_allowed)) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2232 | update_tasks_nodemask(cs); |
| 2233 | |
| 2234 | is_empty = cpumask_empty(cs->cpus_allowed) || |
| 2235 | nodes_empty(cs->mems_allowed); |
| 2236 | |
| 2237 | mutex_unlock(&cpuset_mutex); |
| 2238 | |
| 2239 | /* |
| 2240 | * Move tasks to the nearest ancestor with execution resources, |
| 2241 | * This is full cgroup operation which will also call back into |
| 2242 | * cpuset. Should be done outside any lock. |
| 2243 | */ |
| 2244 | if (is_empty) |
| 2245 | remove_tasks_in_empty_cpuset(cs); |
| 2246 | |
| 2247 | mutex_lock(&cpuset_mutex); |
| 2248 | } |
| 2249 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2250 | static void |
| 2251 | hotplug_update_tasks(struct cpuset *cs, |
| 2252 | struct cpumask *new_cpus, nodemask_t *new_mems, |
| 2253 | bool cpus_updated, bool mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2254 | { |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2255 | if (cpumask_empty(new_cpus)) |
| 2256 | cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); |
| 2257 | if (nodes_empty(*new_mems)) |
| 2258 | *new_mems = parent_cs(cs)->effective_mems; |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2259 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2260 | spin_lock_irq(&callback_lock); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2261 | cpumask_copy(cs->effective_cpus, new_cpus); |
| 2262 | cs->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2263 | spin_unlock_irq(&callback_lock); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2264 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2265 | if (cpus_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2266 | update_tasks_cpumask(cs); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2267 | if (mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2268 | update_tasks_nodemask(cs); |
| 2269 | } |
| 2270 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2271 | /** |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 2272 | * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2273 | * @cs: cpuset in interest |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2274 | * |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2275 | * Compare @cs's cpu and mem masks against top_cpuset and if some have gone |
| 2276 | * offline, update @cs accordingly. If @cs ends up with no CPU or memory, |
| 2277 | * all its tasks are moved to the nearest ancestor with both resources. |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2278 | */ |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 2279 | static void cpuset_hotplug_update_tasks(struct cpuset *cs) |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2280 | { |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2281 | static cpumask_t new_cpus; |
| 2282 | static nodemask_t new_mems; |
| 2283 | bool cpus_updated; |
| 2284 | bool mems_updated; |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 2285 | retry: |
| 2286 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2287 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2288 | mutex_lock(&cpuset_mutex); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2289 | |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 2290 | /* |
| 2291 | * We have raced with task attaching. We wait until attaching |
| 2292 | * is finished, so we won't attach a task to an empty cpuset. |
| 2293 | */ |
| 2294 | if (cs->attach_in_progress) { |
| 2295 | mutex_unlock(&cpuset_mutex); |
| 2296 | goto retry; |
| 2297 | } |
| 2298 | |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2299 | cpumask_and(&new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2300 | nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems); |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2301 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2302 | cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); |
| 2303 | mems_updated = !nodes_equal(new_mems, cs->effective_mems); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2304 | |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 2305 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2306 | hotplug_update_tasks(cs, &new_cpus, &new_mems, |
| 2307 | cpus_updated, mems_updated); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2308 | else |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2309 | hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, |
| 2310 | cpus_updated, mems_updated); |
Tejun Heo | 8d03394 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2311 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2312 | mutex_unlock(&cpuset_mutex); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2313 | } |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2314 | |
Peter Zijlstra | ba15518 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 2315 | static bool force_rebuild; |
| 2316 | |
| 2317 | void cpuset_force_rebuild(void) |
| 2318 | { |
| 2319 | force_rebuild = true; |
| 2320 | } |
| 2321 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2322 | /** |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2323 | * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2324 | * |
| 2325 | * This function is called after either CPU or memory configuration has |
| 2326 | * changed and updates cpuset accordingly. The top_cpuset is always |
| 2327 | * synchronized to cpu_active_mask and N_MEMORY, which is necessary in |
| 2328 | * order to make cpusets transparent (of no affect) on systems that are |
| 2329 | * actively using CPU hotplug but making no active use of cpusets. |
| 2330 | * |
| 2331 | * Non-root cpusets are only affected by offlining. If any CPUs or memory |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 2332 | * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on |
| 2333 | * all descendants. |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2334 | * |
| 2335 | * Note that CPU offlining during suspend is ignored. We don't modify |
| 2336 | * cpusets across suspend/resume cycles at all. |
| 2337 | */ |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2338 | static void cpuset_hotplug_workfn(struct work_struct *work) |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2339 | { |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 2340 | static cpumask_t new_cpus; |
| 2341 | static nodemask_t new_mems; |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2342 | bool cpus_updated, mems_updated; |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 2343 | bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2344 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2345 | mutex_lock(&cpuset_mutex); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2346 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2347 | /* fetch the available cpus/mems and find out which changed how */ |
| 2348 | cpumask_copy(&new_cpus, cpu_active_mask); |
| 2349 | new_mems = node_states[N_MEMORY]; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2350 | |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 2351 | cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); |
| 2352 | mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 2353 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2354 | /* synchronize cpus_allowed to cpu_active_mask */ |
| 2355 | if (cpus_updated) { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2356 | spin_lock_irq(&callback_lock); |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 2357 | if (!on_dfl) |
| 2358 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); |
Li Zefan | 1344ab9 | 2014-07-09 16:47:16 +0800 | [diff] [blame] | 2359 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2360 | spin_unlock_irq(&callback_lock); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2361 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
| 2362 | } |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 2363 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2364 | /* synchronize mems_allowed to N_MEMORY */ |
| 2365 | if (mems_updated) { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2366 | spin_lock_irq(&callback_lock); |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 2367 | if (!on_dfl) |
| 2368 | top_cpuset.mems_allowed = new_mems; |
Li Zefan | 1344ab9 | 2014-07-09 16:47:16 +0800 | [diff] [blame] | 2369 | top_cpuset.effective_mems = new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2370 | spin_unlock_irq(&callback_lock); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 2371 | update_tasks_nodemask(&top_cpuset); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2372 | } |
| 2373 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2374 | mutex_unlock(&cpuset_mutex); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2375 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 2376 | /* if cpus or mems changed, we need to propagate to descendants */ |
| 2377 | if (cpus_updated || mems_updated) { |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 2378 | struct cpuset *cs; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2379 | struct cgroup_subsys_state *pos_css; |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2380 | |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2381 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2382 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 2383 | if (cs == &top_cpuset || !css_tryget_online(&cs->css)) |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 2384 | continue; |
| 2385 | rcu_read_unlock(); |
| 2386 | |
| 2387 | cpuset_hotplug_update_tasks(cs); |
| 2388 | |
| 2389 | rcu_read_lock(); |
| 2390 | css_put(&cs->css); |
| 2391 | } |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2392 | rcu_read_unlock(); |
| 2393 | } |
Tejun Heo | 8d03394 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2394 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2395 | /* rebuild sched domains if cpus_allowed has changed */ |
Peter Zijlstra | ba15518 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 2396 | if (cpus_updated || force_rebuild) { |
| 2397 | force_rebuild = false; |
Li Zhong | e0e80a0 | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 2398 | rebuild_sched_domains(); |
Peter Zijlstra | ba15518 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 2399 | } |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2400 | } |
| 2401 | |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 2402 | void cpuset_update_active_cpus(bool cpu_online) |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 2403 | { |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2404 | /* |
| 2405 | * We're inside cpu hotplug critical region which usually nests |
| 2406 | * inside cgroup synchronization. Bounce actual hotplug processing |
| 2407 | * to a work item to avoid reverse locking order. |
| 2408 | * |
| 2409 | * We still need to do partition_sched_domains() synchronously; |
| 2410 | * otherwise, the scheduler will get confused and put tasks to the |
| 2411 | * dead CPU. Fall back to the default single domain. |
| 2412 | * cpuset_hotplug_workfn() will rebuild it as necessary. |
| 2413 | */ |
| 2414 | partition_sched_domains(1, NULL, NULL); |
| 2415 | schedule_work(&cpuset_hotplug_work); |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 2416 | } |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 2417 | |
Peter Zijlstra | ba15518 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 2418 | void cpuset_wait_for_hotplug(void) |
| 2419 | { |
| 2420 | flush_work(&cpuset_hotplug_work); |
| 2421 | } |
| 2422 | |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2423 | /* |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 2424 | * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. |
| 2425 | * Call this routine anytime after node_states[N_MEMORY] changes. |
Srivatsa S. Bhat | a1cd2b1 | 2012-05-24 19:47:03 +0530 | [diff] [blame] | 2426 | * See cpuset_update_active_cpus() for CPU hotplug handling. |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2427 | */ |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 2428 | static int cpuset_track_online_nodes(struct notifier_block *self, |
| 2429 | unsigned long action, void *arg) |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2430 | { |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2431 | schedule_work(&cpuset_hotplug_work); |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 2432 | return NOTIFY_OK; |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2433 | } |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 2434 | |
| 2435 | static struct notifier_block cpuset_track_online_nodes_nb = { |
| 2436 | .notifier_call = cpuset_track_online_nodes, |
| 2437 | .priority = 10, /* ??! */ |
| 2438 | }; |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2439 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2440 | /** |
| 2441 | * cpuset_init_smp - initialize cpus_allowed |
| 2442 | * |
| 2443 | * Description: Finish top cpuset after cpu, node maps are initialized |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 2444 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2445 | void __init cpuset_init_smp(void) |
| 2446 | { |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2447 | cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 2448 | top_cpuset.mems_allowed = node_states[N_MEMORY]; |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 2449 | top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 2450 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2451 | cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); |
| 2452 | top_cpuset.effective_mems = node_states[N_MEMORY]; |
| 2453 | |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 2454 | register_hotmemory_notifier(&cpuset_track_online_nodes_nb); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 2455 | |
| 2456 | cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); |
| 2457 | BUG_ON(!cpuset_migrate_mm_wq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2458 | } |
| 2459 | |
| 2460 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2461 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
| 2462 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 2463 | * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2464 | * |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2465 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2466 | * attached to the specified @tsk. Guaranteed to return some non-empty |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 2467 | * subset of cpu_online_mask, even if this means going outside the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2468 | * tasks cpuset. |
| 2469 | **/ |
| 2470 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 2471 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2472 | { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2473 | unsigned long flags; |
| 2474 | |
| 2475 | spin_lock_irqsave(&callback_lock, flags); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2476 | rcu_read_lock(); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2477 | guarantee_online_cpus(task_cs(tsk), pmask); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2478 | rcu_read_unlock(); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2479 | spin_unlock_irqrestore(&callback_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2480 | } |
| 2481 | |
Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 2482 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2483 | { |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2484 | rcu_read_lock(); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2485 | do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus); |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2486 | rcu_read_unlock(); |
| 2487 | |
| 2488 | /* |
| 2489 | * We own tsk->cpus_allowed, nobody can change it under us. |
| 2490 | * |
| 2491 | * But we used cs && cs->cpus_allowed lockless and thus can |
| 2492 | * race with cgroup_attach_task() or update_cpumask() and get |
| 2493 | * the wrong tsk->cpus_allowed. However, both cases imply the |
| 2494 | * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() |
| 2495 | * which takes task_rq_lock(). |
| 2496 | * |
| 2497 | * If we are called after it dropped the lock we must see all |
| 2498 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary |
| 2499 | * set any mask even if it is not right from task_cs() pov, |
| 2500 | * the pending set_cpus_allowed_ptr() will fix things. |
Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 2501 | * |
| 2502 | * select_fallback_rq() will fix things ups and set cpu_possible_mask |
| 2503 | * if required. |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2504 | */ |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2505 | } |
| 2506 | |
Rasmus Villemoes | 8f4ab07 | 2015-02-12 15:00:16 -0800 | [diff] [blame] | 2507 | void __init cpuset_init_current_mems_allowed(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2508 | { |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 2509 | nodes_setall(current->mems_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2510 | } |
| 2511 | |
Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 2512 | /** |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2513 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. |
| 2514 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. |
| 2515 | * |
| 2516 | * Description: Returns the nodemask_t mems_allowed of the cpuset |
| 2517 | * attached to the specified @tsk. Guaranteed to return some non-empty |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 2518 | * subset of node_states[N_MEMORY], even if this means going outside the |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2519 | * tasks cpuset. |
| 2520 | **/ |
| 2521 | |
| 2522 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) |
| 2523 | { |
| 2524 | nodemask_t mask; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2525 | unsigned long flags; |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2526 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2527 | spin_lock_irqsave(&callback_lock, flags); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2528 | rcu_read_lock(); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2529 | guarantee_online_mems(task_cs(tsk), &mask); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2530 | rcu_read_unlock(); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2531 | spin_unlock_irqrestore(&callback_lock, flags); |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2532 | |
| 2533 | return mask; |
| 2534 | } |
| 2535 | |
| 2536 | /** |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 2537 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed |
| 2538 | * @nodemask: the nodemask to be checked |
Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 2539 | * |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 2540 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2541 | */ |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 2542 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2543 | { |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 2544 | return nodes_intersects(*nodemask, current->mems_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2545 | } |
| 2546 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2547 | /* |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2548 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
| 2549 | * mem_hardwall ancestor to the specified cpuset. Call holding |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2550 | * callback_lock. If no ancestor is mem_exclusive or mem_hardwall |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2551 | * (an unusual configuration), then returns the root cpuset. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2552 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 2553 | static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2554 | { |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2555 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) |
| 2556 | cs = parent_cs(cs); |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2557 | return cs; |
| 2558 | } |
| 2559 | |
| 2560 | /** |
Vladimir Davydov | 344736f | 2014-10-20 15:50:30 +0400 | [diff] [blame] | 2561 | * cpuset_node_allowed - Can we allocate on a memory node? |
David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 2562 | * @node: is this an allowed node? |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 2563 | * @gfp_mask: memory allocation flags |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2564 | * |
David Rientjes | 6e276d2 | 2015-04-14 15:47:01 -0700 | [diff] [blame] | 2565 | * If we're in interrupt, yes, we can always allocate. If @node is set in |
| 2566 | * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this |
| 2567 | * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, |
| 2568 | * yes. If current has access to memory reserves due to TIF_MEMDIE, yes. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2569 | * Otherwise, no. |
| 2570 | * |
| 2571 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 2572 | * and do not allow allocations outside the current tasks cpuset |
| 2573 | * unless the task has been OOM killed as is marked TIF_MEMDIE. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2574 | * GFP_KERNEL allocations are not so marked, so can escape to the |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2575 | * nearest enclosing hardwalled ancestor cpuset. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2576 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2577 | * Scanning up parent cpusets requires callback_lock. The |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 2578 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit |
| 2579 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the |
| 2580 | * current tasks mems_allowed came up empty on the first pass over |
| 2581 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2582 | * cpuset are short of memory, might require taking the callback_lock. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2583 | * |
Paul Jackson | 36be57f | 2006-05-20 15:00:10 -0700 | [diff] [blame] | 2584 | * The first call here from mm/page_alloc:get_page_from_freelist() |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 2585 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
| 2586 | * so no allocation on a node outside the cpuset is allowed (unless |
| 2587 | * in interrupt, of course). |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2588 | * |
Paul Jackson | 36be57f | 2006-05-20 15:00:10 -0700 | [diff] [blame] | 2589 | * The second pass through get_page_from_freelist() doesn't even call |
| 2590 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() |
| 2591 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set |
| 2592 | * in alloc_flags. That logic and the checks below have the combined |
| 2593 | * affect that: |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2594 | * in_interrupt - any node ok (current task context irrelevant) |
| 2595 | * GFP_ATOMIC - any node ok |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 2596 | * TIF_MEMDIE - any node ok |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2597 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2598 | * GFP_USER - only nodes in current tasks mems allowed ok. |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 2599 | */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2600 | bool __cpuset_node_allowed(int node, gfp_t gfp_mask) |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2601 | { |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 2602 | struct cpuset *cs; /* current cpuset ancestors */ |
Paul Jackson | 29afd49 | 2006-03-24 03:16:12 -0800 | [diff] [blame] | 2603 | int allowed; /* is allocation in zone z allowed? */ |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2604 | unsigned long flags; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2605 | |
David Rientjes | 6e276d2 | 2015-04-14 15:47:01 -0700 | [diff] [blame] | 2606 | if (in_interrupt()) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2607 | return true; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2608 | if (node_isset(node, current->mems_allowed)) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2609 | return true; |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 2610 | /* |
| 2611 | * Allow tasks that have access to memory reserves because they have |
| 2612 | * been OOM killed to get memory anywhere. |
| 2613 | */ |
| 2614 | if (unlikely(test_thread_flag(TIF_MEMDIE))) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2615 | return true; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2616 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2617 | return false; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2618 | |
Bob Picco | 5563e77 | 2005-11-13 16:06:35 -0800 | [diff] [blame] | 2619 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2620 | return true; |
Bob Picco | 5563e77 | 2005-11-13 16:06:35 -0800 | [diff] [blame] | 2621 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2622 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2623 | spin_lock_irqsave(&callback_lock, flags); |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2624 | |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2625 | rcu_read_lock(); |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2626 | cs = nearest_hardwall_ancestor(task_cs(current)); |
Li Zefan | 99afb0f | 2014-02-27 18:19:36 +0800 | [diff] [blame] | 2627 | allowed = node_isset(node, cs->mems_allowed); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2628 | rcu_read_unlock(); |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2629 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2630 | spin_unlock_irqrestore(&callback_lock, flags); |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2631 | return allowed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2632 | } |
| 2633 | |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2634 | /** |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2635 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
| 2636 | * cpuset_slab_spread_node() - On which node to begin search for a slab page |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2637 | * |
| 2638 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for |
| 2639 | * tasks in a cpuset with is_spread_page or is_spread_slab set), |
| 2640 | * and if the memory allocation used cpuset_mem_spread_node() |
| 2641 | * to determine on which node to start looking, as it will for |
| 2642 | * certain page cache or slab cache pages such as used for file |
| 2643 | * system buffers and inode caches, then instead of starting on the |
| 2644 | * local node to look for a free page, rather spread the starting |
| 2645 | * node around the tasks mems_allowed nodes. |
| 2646 | * |
| 2647 | * We don't have to worry about the returned node being offline |
| 2648 | * because "it can't happen", and even if it did, it would be ok. |
| 2649 | * |
| 2650 | * The routines calling guarantee_online_mems() are careful to |
| 2651 | * only set nodes in task->mems_allowed that are online. So it |
| 2652 | * should not be possible for the following code to return an |
| 2653 | * offline node. But if it did, that would be ok, as this routine |
| 2654 | * is not returning the node where the allocation must be, only |
| 2655 | * the node where the search should start. The zonelist passed to |
| 2656 | * __alloc_pages() will include all nodes. If the slab allocator |
| 2657 | * is passed an offline node, it will fall back to the local node. |
| 2658 | * See kmem_cache_alloc_node(). |
| 2659 | */ |
| 2660 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2661 | static int cpuset_spread_node(int *rotor) |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2662 | { |
Andrew Morton | 0edaf86 | 2016-05-19 17:10:58 -0700 | [diff] [blame] | 2663 | return *rotor = next_node_in(*rotor, current->mems_allowed); |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2664 | } |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2665 | |
| 2666 | int cpuset_mem_spread_node(void) |
| 2667 | { |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 2668 | if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) |
| 2669 | current->cpuset_mem_spread_rotor = |
| 2670 | node_random(¤t->mems_allowed); |
| 2671 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2672 | return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); |
| 2673 | } |
| 2674 | |
| 2675 | int cpuset_slab_spread_node(void) |
| 2676 | { |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 2677 | if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) |
| 2678 | current->cpuset_slab_spread_rotor = |
| 2679 | node_random(¤t->mems_allowed); |
| 2680 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2681 | return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); |
| 2682 | } |
| 2683 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2684 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); |
| 2685 | |
| 2686 | /** |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 2687 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
| 2688 | * @tsk1: pointer to task_struct of some task. |
| 2689 | * @tsk2: pointer to task_struct of some other task. |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2690 | * |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 2691 | * Description: Return true if @tsk1's mems_allowed intersects the |
| 2692 | * mems_allowed of @tsk2. Used by the OOM killer to determine if |
| 2693 | * one of the task's memory usage might impact the memory available |
| 2694 | * to the other. |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2695 | **/ |
| 2696 | |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 2697 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 2698 | const struct task_struct *tsk2) |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2699 | { |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 2700 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2701 | } |
| 2702 | |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2703 | /** |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2704 | * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2705 | * |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2706 | * Description: Prints current's name, cpuset name, and cached copy of its |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2707 | * mems_allowed to the kernel log. |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2708 | */ |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2709 | void cpuset_print_current_mems_allowed(void) |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2710 | { |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2711 | struct cgroup *cgrp; |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2712 | |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2713 | rcu_read_lock(); |
Li Zefan | 63f43f5 | 2013-01-25 16:08:01 +0800 | [diff] [blame] | 2714 | |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2715 | cgrp = task_cs(current)->css.cgroup; |
| 2716 | pr_info("%s cpuset=", current->comm); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 2717 | pr_cont_cgroup_name(cgrp); |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2718 | pr_cont(" mems_allowed=%*pbl\n", |
| 2719 | nodemask_pr_args(¤t->mems_allowed)); |
Li Zefan | f440d98 | 2013-03-01 15:02:15 +0800 | [diff] [blame] | 2720 | |
Li Zefan | cfb5966 | 2013-03-12 10:28:39 +0800 | [diff] [blame] | 2721 | rcu_read_unlock(); |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2722 | } |
| 2723 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2724 | /* |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2725 | * Collection of memory_pressure is suppressed unless |
| 2726 | * this flag is enabled by writing "1" to the special |
| 2727 | * cpuset file 'memory_pressure_enabled' in the root cpuset. |
| 2728 | */ |
| 2729 | |
Paul Jackson | c5b2aff | 2006-01-08 01:01:51 -0800 | [diff] [blame] | 2730 | int cpuset_memory_pressure_enabled __read_mostly; |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2731 | |
| 2732 | /** |
| 2733 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. |
| 2734 | * |
| 2735 | * Keep a running average of the rate of synchronous (direct) |
| 2736 | * page reclaim efforts initiated by tasks in each cpuset. |
| 2737 | * |
| 2738 | * This represents the rate at which some task in the cpuset |
| 2739 | * ran low on memory on all nodes it was allowed to use, and |
| 2740 | * had to enter the kernels page reclaim code in an effort to |
| 2741 | * create more free memory by tossing clean pages or swapping |
| 2742 | * or writing dirty pages. |
| 2743 | * |
| 2744 | * Display to user space in the per-cpuset read-only file |
| 2745 | * "memory_pressure". Value displayed is an integer |
| 2746 | * representing the recent rate of entry into the synchronous |
| 2747 | * (direct) page reclaim by any task attached to the cpuset. |
| 2748 | **/ |
| 2749 | |
| 2750 | void __cpuset_memory_pressure_bump(void) |
| 2751 | { |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2752 | rcu_read_lock(); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2753 | fmeter_markevent(&task_cs(current)->fmeter); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2754 | rcu_read_unlock(); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2755 | } |
| 2756 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2757 | #ifdef CONFIG_PROC_PID_CPUSET |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2758 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2759 | * proc_cpuset_show() |
| 2760 | * - Print tasks cpuset path into seq_file. |
| 2761 | * - Used for /proc/<pid>/cpuset. |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2762 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
| 2763 | * doesn't really matter if tsk->cpuset changes after we read it, |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2764 | * and we take cpuset_mutex, keeping cpuset_attach() from changing it |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 2765 | * anyway. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2766 | */ |
Zefan Li | 52de477 | 2014-09-18 16:03:36 +0800 | [diff] [blame] | 2767 | int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
| 2768 | struct pid *pid, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2769 | { |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 2770 | char *buf; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2771 | struct cgroup_subsys_state *css; |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2772 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2773 | |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2774 | retval = -ENOMEM; |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 2775 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2776 | if (!buf) |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2777 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2778 | |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 2779 | css = task_get_css(tsk, cpuset_cgrp_id); |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 2780 | retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, |
| 2781 | current->nsproxy->cgroup_ns); |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 2782 | css_put(css); |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 2783 | if (retval >= PATH_MAX) |
Tejun Heo | 679a5e3 | 2016-09-29 11:58:36 +0200 | [diff] [blame] | 2784 | retval = -ENAMETOOLONG; |
| 2785 | if (retval < 0) |
Zefan Li | 52de477 | 2014-09-18 16:03:36 +0800 | [diff] [blame] | 2786 | goto out_free; |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 2787 | seq_puts(m, buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2788 | seq_putc(m, '\n'); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 2789 | retval = 0; |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2790 | out_free: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2791 | kfree(buf); |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2792 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2793 | return retval; |
| 2794 | } |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2795 | #endif /* CONFIG_PROC_PID_CPUSET */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2796 | |
Heiko Carstens | d01d482 | 2009-09-21 11:06:27 +0200 | [diff] [blame] | 2797 | /* Display task mems_allowed in /proc/<pid>/status file. */ |
Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 2798 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2799 | { |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2800 | seq_printf(m, "Mems_allowed:\t%*pb\n", |
| 2801 | nodemask_pr_args(&task->mems_allowed)); |
| 2802 | seq_printf(m, "Mems_allowed_list:\t%*pbl\n", |
| 2803 | nodemask_pr_args(&task->mems_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2804 | } |