Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1 | |
| 2 | #include <linux/sched.h> |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 3 | #include <linux/sched/sysctl.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 4 | #include <linux/sched/rt.h> |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 5 | #include <linux/u64_stats_sync.h> |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 6 | #include <linux/sched/deadline.h> |
Steven Rostedt (Red Hat) | 3866e84 | 2016-02-22 16:26:51 -0500 | [diff] [blame] | 7 | #include <linux/binfmts.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 8 | #include <linux/mutex.h> |
| 9 | #include <linux/spinlock.h> |
| 10 | #include <linux/stop_machine.h> |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 11 | #include <linux/irq_work.h> |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 12 | #include <linux/tick.h> |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 13 | #include <linux/slab.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 14 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 15 | #include "cpupri.h" |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 16 | #include "cpudeadline.h" |
Li Zefan | 60fed78 | 2013-03-29 14:36:43 +0800 | [diff] [blame] | 17 | #include "cpuacct.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 18 | |
Peter Zijlstra | 9148a3a | 2016-09-20 22:34:51 +0200 | [diff] [blame] | 19 | #ifdef CONFIG_SCHED_DEBUG |
| 20 | #define SCHED_WARN_ON(x) WARN_ONCE(x, #x) |
| 21 | #else |
| 22 | #define SCHED_WARN_ON(x) ((void)(x)) |
| 23 | #endif |
| 24 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 25 | struct rq; |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 26 | struct cpuidle_state; |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 27 | |
Channagoud Kadabi | 8810e5f | 2017-02-17 16:01:05 -0800 | [diff] [blame] | 28 | #ifdef CONFIG_SCHED_HMP |
| 29 | #define NUM_TRACKED_WINDOWS 2 |
| 30 | #define NUM_LOAD_INDICES 1000 |
| 31 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 32 | struct hmp_sched_stats { |
| 33 | int nr_big_tasks; |
| 34 | u64 cumulative_runnable_avg; |
| 35 | u64 pred_demands_sum; |
| 36 | }; |
| 37 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 38 | struct load_subtractions { |
| 39 | u64 window_start; |
| 40 | u64 subs; |
| 41 | u64 new_subs; |
| 42 | }; |
| 43 | |
| 44 | struct group_cpu_time { |
| 45 | u64 curr_runnable_sum; |
| 46 | u64 prev_runnable_sum; |
| 47 | u64 nt_curr_runnable_sum; |
| 48 | u64 nt_prev_runnable_sum; |
| 49 | }; |
| 50 | |
| 51 | struct sched_cluster { |
| 52 | raw_spinlock_t load_lock; |
| 53 | struct list_head list; |
| 54 | struct cpumask cpus; |
| 55 | int id; |
| 56 | int max_power_cost; |
| 57 | int min_power_cost; |
| 58 | int max_possible_capacity; |
| 59 | int capacity; |
| 60 | int efficiency; /* Differentiate cpus with different IPC capability */ |
| 61 | int load_scale_factor; |
| 62 | unsigned int exec_scale_factor; |
| 63 | /* |
| 64 | * max_freq = user maximum |
| 65 | * max_mitigated_freq = thermal defined maximum |
| 66 | * max_possible_freq = maximum supported by hardware |
| 67 | */ |
| 68 | unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq; |
| 69 | unsigned int max_possible_freq; |
| 70 | bool freq_init_done; |
| 71 | int dstate, dstate_wakeup_latency, dstate_wakeup_energy; |
| 72 | unsigned int static_cluster_pwr_cost; |
| 73 | int notifier_sent; |
| 74 | bool wake_up_idle; |
| 75 | }; |
| 76 | |
| 77 | struct cpu_cycle { |
| 78 | u64 cycles; |
| 79 | u64 time; |
| 80 | }; |
| 81 | |
Vikram Mulukutla | 4b54aae | 2017-03-20 13:41:37 -0700 | [diff] [blame^] | 82 | extern unsigned int sched_disable_window_stats; |
Channagoud Kadabi | 8810e5f | 2017-02-17 16:01:05 -0800 | [diff] [blame] | 83 | #endif /* CONFIG_SCHED_HMP */ |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 84 | |
| 85 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 86 | /* task_struct::on_rq states: */ |
| 87 | #define TASK_ON_RQ_QUEUED 1 |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 88 | #define TASK_ON_RQ_MIGRATING 2 |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 89 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 90 | extern __read_mostly int scheduler_running; |
| 91 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 92 | extern unsigned long calc_load_update; |
| 93 | extern atomic_long_t calc_load_tasks; |
| 94 | |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 95 | extern void calc_global_load_tick(struct rq *this_rq); |
Thomas Gleixner | d60585c | 2016-07-12 18:33:56 +0200 | [diff] [blame] | 96 | extern long calc_load_fold_active(struct rq *this_rq, long adjust); |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 97 | |
| 98 | #ifdef CONFIG_SMP |
Frederic Weisbecker | cee1afc | 2016-04-13 15:56:50 +0200 | [diff] [blame] | 99 | extern void cpu_load_update_active(struct rq *this_rq); |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 100 | #else |
Frederic Weisbecker | cee1afc | 2016-04-13 15:56:50 +0200 | [diff] [blame] | 101 | static inline void cpu_load_update_active(struct rq *this_rq) { } |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 102 | #endif |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 103 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 104 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 105 | * Helpers for converting nanosecond timing to jiffy resolution |
| 106 | */ |
| 107 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
| 108 | |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 109 | /* |
| 110 | * Increase resolution of nice-level calculations for 64-bit architectures. |
| 111 | * The extra resolution improves shares distribution and load balancing of |
| 112 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup |
| 113 | * hierarchies, especially on larger systems. This is not a user-visible change |
| 114 | * and does not change the user-interface for setting shares/weights. |
| 115 | * |
| 116 | * We increase resolution only if we have enough bits to allow this increased |
Peter Zijlstra | 2159197 | 2016-04-28 12:49:38 +0200 | [diff] [blame] | 117 | * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are |
| 118 | * pretty high and the returns do not justify the increased costs. |
| 119 | * |
| 120 | * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to |
| 121 | * increase coverage and consistency always enable it on 64bit platforms. |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 122 | */ |
Peter Zijlstra | 2159197 | 2016-04-28 12:49:38 +0200 | [diff] [blame] | 123 | #ifdef CONFIG_64BIT |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 124 | # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 125 | # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) |
| 126 | # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 127 | #else |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 128 | # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 129 | # define scale_load(w) (w) |
| 130 | # define scale_load_down(w) (w) |
| 131 | #endif |
| 132 | |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 133 | /* |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 134 | * Task weight (visible to users) and its load (invisible to users) have |
| 135 | * independent resolution, but they should be well calibrated. We use |
| 136 | * scale_load() and scale_load_down(w) to convert between them. The |
| 137 | * following must be true: |
| 138 | * |
| 139 | * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD |
| 140 | * |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 141 | */ |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 142 | #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 143 | |
| 144 | /* |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 145 | * Single value that decides SCHED_DEADLINE internal math precision. |
| 146 | * 10 -> just above 1us |
| 147 | * 9 -> just above 0.5us |
| 148 | */ |
| 149 | #define DL_SCALE (10) |
| 150 | |
| 151 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 152 | * These are the 'tuning knobs' of the scheduler: |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 153 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 154 | |
| 155 | /* |
| 156 | * single value that denotes runtime == period, ie unlimited time. |
| 157 | */ |
| 158 | #define RUNTIME_INF ((u64)~0ULL) |
| 159 | |
Henrik Austad | 20f9cd2 | 2015-09-09 17:00:41 +0200 | [diff] [blame] | 160 | static inline int idle_policy(int policy) |
| 161 | { |
| 162 | return policy == SCHED_IDLE; |
| 163 | } |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 164 | static inline int fair_policy(int policy) |
| 165 | { |
| 166 | return policy == SCHED_NORMAL || policy == SCHED_BATCH; |
| 167 | } |
| 168 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 169 | static inline int rt_policy(int policy) |
| 170 | { |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 171 | return policy == SCHED_FIFO || policy == SCHED_RR; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 172 | } |
| 173 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 174 | static inline int dl_policy(int policy) |
| 175 | { |
| 176 | return policy == SCHED_DEADLINE; |
| 177 | } |
Henrik Austad | 20f9cd2 | 2015-09-09 17:00:41 +0200 | [diff] [blame] | 178 | static inline bool valid_policy(int policy) |
| 179 | { |
| 180 | return idle_policy(policy) || fair_policy(policy) || |
| 181 | rt_policy(policy) || dl_policy(policy); |
| 182 | } |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 183 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 184 | static inline int task_has_rt_policy(struct task_struct *p) |
| 185 | { |
| 186 | return rt_policy(p->policy); |
| 187 | } |
| 188 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 189 | static inline int task_has_dl_policy(struct task_struct *p) |
| 190 | { |
| 191 | return dl_policy(p->policy); |
| 192 | } |
| 193 | |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 194 | /* |
| 195 | * Tells if entity @a should preempt entity @b. |
| 196 | */ |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 197 | static inline bool |
| 198 | dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 199 | { |
| 200 | return dl_time_before(a->deadline, b->deadline); |
| 201 | } |
| 202 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 203 | /* |
| 204 | * This is the priority-queue data structure of the RT scheduling class: |
| 205 | */ |
| 206 | struct rt_prio_array { |
| 207 | DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
| 208 | struct list_head queue[MAX_RT_PRIO]; |
| 209 | }; |
| 210 | |
| 211 | struct rt_bandwidth { |
| 212 | /* nests inside the rq lock: */ |
| 213 | raw_spinlock_t rt_runtime_lock; |
| 214 | ktime_t rt_period; |
| 215 | u64 rt_runtime; |
| 216 | struct hrtimer rt_period_timer; |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 217 | unsigned int rt_period_active; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 218 | }; |
Juri Lelli | a5e7be3 | 2014-09-19 10:22:39 +0100 | [diff] [blame] | 219 | |
| 220 | void __dl_clear_params(struct task_struct *p); |
| 221 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 222 | /* |
| 223 | * To keep the bandwidth of -deadline tasks and groups under control |
| 224 | * we need some place where: |
| 225 | * - store the maximum -deadline bandwidth of the system (the group); |
| 226 | * - cache the fraction of that bandwidth that is currently allocated. |
| 227 | * |
| 228 | * This is all done in the data structure below. It is similar to the |
| 229 | * one used for RT-throttling (rt_bandwidth), with the main difference |
| 230 | * that, since here we are only interested in admission control, we |
| 231 | * do not decrease any runtime while the group "executes", neither we |
| 232 | * need a timer to replenish it. |
| 233 | * |
| 234 | * With respect to SMP, the bandwidth is given on a per-CPU basis, |
| 235 | * meaning that: |
| 236 | * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; |
| 237 | * - dl_total_bw array contains, in the i-eth element, the currently |
| 238 | * allocated bandwidth on the i-eth CPU. |
| 239 | * Moreover, groups consume bandwidth on each CPU, while tasks only |
| 240 | * consume bandwidth on the CPU they're running on. |
| 241 | * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw |
| 242 | * that will be shown the next time the proc or cgroup controls will |
| 243 | * be red. It on its turn can be changed by writing on its own |
| 244 | * control. |
| 245 | */ |
| 246 | struct dl_bandwidth { |
| 247 | raw_spinlock_t dl_runtime_lock; |
| 248 | u64 dl_runtime; |
| 249 | u64 dl_period; |
| 250 | }; |
| 251 | |
| 252 | static inline int dl_bandwidth_enabled(void) |
| 253 | { |
Peter Zijlstra | 1724813 | 2013-12-17 12:44:49 +0100 | [diff] [blame] | 254 | return sysctl_sched_rt_runtime >= 0; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 255 | } |
| 256 | |
| 257 | extern struct dl_bw *dl_bw_of(int i); |
| 258 | |
| 259 | struct dl_bw { |
| 260 | raw_spinlock_t lock; |
| 261 | u64 bw, total_bw; |
| 262 | }; |
| 263 | |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 264 | static inline |
| 265 | void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) |
| 266 | { |
| 267 | dl_b->total_bw -= tsk_bw; |
| 268 | } |
| 269 | |
| 270 | static inline |
| 271 | void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) |
| 272 | { |
| 273 | dl_b->total_bw += tsk_bw; |
| 274 | } |
| 275 | |
| 276 | static inline |
| 277 | bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) |
| 278 | { |
| 279 | return dl_b->bw != -1 && |
| 280 | dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; |
| 281 | } |
| 282 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 283 | extern struct mutex sched_domains_mutex; |
| 284 | |
| 285 | #ifdef CONFIG_CGROUP_SCHED |
| 286 | |
| 287 | #include <linux/cgroup.h> |
| 288 | |
| 289 | struct cfs_rq; |
| 290 | struct rt_rq; |
| 291 | |
Mike Galbraith | 35cf4e5 | 2012-08-07 05:00:13 +0200 | [diff] [blame] | 292 | extern struct list_head task_groups; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 293 | |
| 294 | struct cfs_bandwidth { |
| 295 | #ifdef CONFIG_CFS_BANDWIDTH |
| 296 | raw_spinlock_t lock; |
| 297 | ktime_t period; |
| 298 | u64 quota, runtime; |
Zhihui Zhang | 9c58c79 | 2014-09-20 21:24:36 -0400 | [diff] [blame] | 299 | s64 hierarchical_quota; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 300 | u64 runtime_expires; |
| 301 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 302 | int idle, period_active; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 303 | struct hrtimer period_timer, slack_timer; |
| 304 | struct list_head throttled_cfs_rq; |
| 305 | |
| 306 | /* statistics */ |
| 307 | int nr_periods, nr_throttled; |
| 308 | u64 throttled_time; |
| 309 | #endif |
| 310 | }; |
| 311 | |
| 312 | /* task group related information */ |
| 313 | struct task_group { |
| 314 | struct cgroup_subsys_state css; |
| 315 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 316 | #ifdef CONFIG_SCHED_HMP |
| 317 | bool upmigrate_discouraged; |
| 318 | #endif |
| 319 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 320 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 321 | /* schedulable entities of this group on each cpu */ |
| 322 | struct sched_entity **se; |
| 323 | /* runqueue "owned" by this group on each cpu */ |
| 324 | struct cfs_rq **cfs_rq; |
| 325 | unsigned long shares; |
| 326 | |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 327 | #ifdef CONFIG_SMP |
Waiman Long | b036762 | 2015-12-02 13:41:49 -0500 | [diff] [blame] | 328 | /* |
| 329 | * load_avg can be heavily contended at clock tick time, so put |
| 330 | * it in its own cacheline separated from the fields above which |
| 331 | * will also be accessed at each tick. |
| 332 | */ |
| 333 | atomic_long_t load_avg ____cacheline_aligned; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 334 | #endif |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 335 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 336 | |
| 337 | #ifdef CONFIG_RT_GROUP_SCHED |
| 338 | struct sched_rt_entity **rt_se; |
| 339 | struct rt_rq **rt_rq; |
| 340 | |
| 341 | struct rt_bandwidth rt_bandwidth; |
| 342 | #endif |
| 343 | |
| 344 | struct rcu_head rcu; |
| 345 | struct list_head list; |
| 346 | |
| 347 | struct task_group *parent; |
| 348 | struct list_head siblings; |
| 349 | struct list_head children; |
| 350 | |
| 351 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 352 | struct autogroup *autogroup; |
| 353 | #endif |
| 354 | |
| 355 | struct cfs_bandwidth cfs_bandwidth; |
| 356 | }; |
| 357 | |
| 358 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 359 | #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
| 360 | |
| 361 | /* |
| 362 | * A weight of 0 or 1 can cause arithmetics problems. |
| 363 | * A weight of a cfs_rq is the sum of weights of which entities |
| 364 | * are queued on this cfs_rq, so a weight of a entity should not be |
| 365 | * too large, so as the shares value of a task group. |
| 366 | * (The default weight is 1024 - so there's no practical |
| 367 | * limitation from this.) |
| 368 | */ |
| 369 | #define MIN_SHARES (1UL << 1) |
| 370 | #define MAX_SHARES (1UL << 18) |
| 371 | #endif |
| 372 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 373 | typedef int (*tg_visitor)(struct task_group *, void *); |
| 374 | |
| 375 | extern int walk_tg_tree_from(struct task_group *from, |
| 376 | tg_visitor down, tg_visitor up, void *data); |
| 377 | |
| 378 | /* |
| 379 | * Iterate the full tree, calling @down when first entering a node and @up when |
| 380 | * leaving it for the final time. |
| 381 | * |
| 382 | * Caller must hold rcu_lock or sufficient equivalent. |
| 383 | */ |
| 384 | static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
| 385 | { |
| 386 | return walk_tg_tree_from(&root_task_group, down, up, data); |
| 387 | } |
| 388 | |
| 389 | extern int tg_nop(struct task_group *tg, void *data); |
| 390 | |
| 391 | extern void free_fair_sched_group(struct task_group *tg); |
| 392 | extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); |
Peter Zijlstra | 8663e24 | 2016-06-22 14:58:02 +0200 | [diff] [blame] | 393 | extern void online_fair_sched_group(struct task_group *tg); |
Peter Zijlstra | 6fe1f34 | 2016-01-21 22:24:16 +0100 | [diff] [blame] | 394 | extern void unregister_fair_sched_group(struct task_group *tg); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 395 | extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
| 396 | struct sched_entity *se, int cpu, |
| 397 | struct sched_entity *parent); |
| 398 | extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 399 | |
| 400 | extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 77a4d1a | 2015-04-15 11:41:57 +0200 | [diff] [blame] | 401 | extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 402 | extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); |
| 403 | |
| 404 | extern void free_rt_sched_group(struct task_group *tg); |
| 405 | extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); |
| 406 | extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
| 407 | struct sched_rt_entity *rt_se, int cpu, |
| 408 | struct sched_rt_entity *parent); |
| 409 | |
Li Zefan | 25cc7da | 2013-03-05 16:07:33 +0800 | [diff] [blame] | 410 | extern struct task_group *sched_create_group(struct task_group *parent); |
| 411 | extern void sched_online_group(struct task_group *tg, |
| 412 | struct task_group *parent); |
| 413 | extern void sched_destroy_group(struct task_group *tg); |
| 414 | extern void sched_offline_group(struct task_group *tg); |
| 415 | |
| 416 | extern void sched_move_task(struct task_struct *tsk); |
| 417 | |
| 418 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 419 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
Byungchul Park | ad936d8 | 2015-10-24 01:16:19 +0900 | [diff] [blame] | 420 | |
| 421 | #ifdef CONFIG_SMP |
| 422 | extern void set_task_rq_fair(struct sched_entity *se, |
| 423 | struct cfs_rq *prev, struct cfs_rq *next); |
| 424 | #else /* !CONFIG_SMP */ |
| 425 | static inline void set_task_rq_fair(struct sched_entity *se, |
| 426 | struct cfs_rq *prev, struct cfs_rq *next) { } |
| 427 | #endif /* CONFIG_SMP */ |
| 428 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Li Zefan | 25cc7da | 2013-03-05 16:07:33 +0800 | [diff] [blame] | 429 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 430 | extern struct task_group *css_tg(struct cgroup_subsys_state *css); |
| 431 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 432 | #else /* CONFIG_CGROUP_SCHED */ |
| 433 | |
| 434 | struct cfs_bandwidth { }; |
| 435 | |
| 436 | #endif /* CONFIG_CGROUP_SCHED */ |
| 437 | |
| 438 | /* CFS-related fields in a runqueue */ |
| 439 | struct cfs_rq { |
| 440 | struct load_weight load; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 441 | unsigned int nr_running, h_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 442 | |
| 443 | u64 exec_clock; |
| 444 | u64 min_vruntime; |
| 445 | #ifndef CONFIG_64BIT |
| 446 | u64 min_vruntime_copy; |
| 447 | #endif |
| 448 | |
| 449 | struct rb_root tasks_timeline; |
| 450 | struct rb_node *rb_leftmost; |
| 451 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 452 | /* |
| 453 | * 'curr' points to currently running entity on this cfs_rq. |
| 454 | * It is set to NULL otherwise (i.e when none are currently running). |
| 455 | */ |
| 456 | struct sched_entity *curr, *next, *last, *skip; |
| 457 | |
| 458 | #ifdef CONFIG_SCHED_DEBUG |
| 459 | unsigned int nr_spread_over; |
| 460 | #endif |
| 461 | |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 462 | #ifdef CONFIG_SMP |
| 463 | /* |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 464 | * CFS load tracking |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 465 | */ |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 466 | struct sched_avg avg; |
Yuyang Du | 1396223 | 2015-07-15 08:04:41 +0800 | [diff] [blame] | 467 | u64 runnable_load_sum; |
| 468 | unsigned long runnable_load_avg; |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 469 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 470 | unsigned long tg_load_avg_contrib; |
| 471 | #endif |
| 472 | atomic_long_t removed_load_avg, removed_util_avg; |
| 473 | #ifndef CONFIG_64BIT |
| 474 | u64 load_last_update_time_copy; |
| 475 | #endif |
Alex Shi | 141965c | 2013-06-26 13:05:39 +0800 | [diff] [blame] | 476 | |
Paul Turner | c566e8e | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 477 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 478 | /* |
| 479 | * h_load = weight * f(tg) |
| 480 | * |
| 481 | * Where f(tg) is the recursive weight fraction assigned to |
| 482 | * this group. |
| 483 | */ |
| 484 | unsigned long h_load; |
Vladimir Davydov | 6852079 | 2013-07-15 17:49:19 +0400 | [diff] [blame] | 485 | u64 last_h_load_update; |
| 486 | struct sched_entity *h_load_next; |
| 487 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 488 | #endif /* CONFIG_SMP */ |
| 489 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 490 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 491 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| 492 | |
| 493 | /* |
| 494 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
| 495 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
| 496 | * (like users, containers etc.) |
| 497 | * |
| 498 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This |
| 499 | * list is used during load balance. |
| 500 | */ |
| 501 | int on_list; |
| 502 | struct list_head leaf_cfs_rq_list; |
| 503 | struct task_group *tg; /* group that "owns" this runqueue */ |
| 504 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 505 | #ifdef CONFIG_SCHED_WALT |
| 506 | u64 cumulative_runnable_avg; |
| 507 | #endif |
| 508 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 509 | #ifdef CONFIG_CFS_BANDWIDTH |
Channagoud Kadabi | 8810e5f | 2017-02-17 16:01:05 -0800 | [diff] [blame] | 510 | #ifdef CONFIG_SCHED_HMP |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 511 | struct hmp_sched_stats hmp_stats; |
Channagoud Kadabi | 8810e5f | 2017-02-17 16:01:05 -0800 | [diff] [blame] | 512 | #endif |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 513 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 514 | int runtime_enabled; |
| 515 | u64 runtime_expires; |
| 516 | s64 runtime_remaining; |
| 517 | |
Paul Turner | f1b1728 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 518 | u64 throttled_clock, throttled_clock_task; |
| 519 | u64 throttled_clock_task_time; |
Peter Zijlstra | 55e16d3 | 2016-06-22 15:14:26 +0200 | [diff] [blame] | 520 | int throttled, throttle_count; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 521 | struct list_head throttled_list; |
| 522 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 523 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 524 | }; |
| 525 | |
| 526 | static inline int rt_bandwidth_enabled(void) |
| 527 | { |
| 528 | return sysctl_sched_rt_runtime >= 0; |
| 529 | } |
| 530 | |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 531 | /* RT IPI pull logic requires IRQ_WORK */ |
| 532 | #ifdef CONFIG_IRQ_WORK |
| 533 | # define HAVE_RT_PUSH_IPI |
| 534 | #endif |
| 535 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 536 | /* Real-Time classes' related field in a runqueue: */ |
| 537 | struct rt_rq { |
| 538 | struct rt_prio_array active; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 539 | unsigned int rt_nr_running; |
Frederic Weisbecker | 01d36d0 | 2015-11-04 18:17:10 +0100 | [diff] [blame] | 540 | unsigned int rr_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 541 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
| 542 | struct { |
| 543 | int curr; /* highest queued rt task prio */ |
| 544 | #ifdef CONFIG_SMP |
| 545 | int next; /* next highest */ |
| 546 | #endif |
| 547 | } highest_prio; |
| 548 | #endif |
| 549 | #ifdef CONFIG_SMP |
| 550 | unsigned long rt_nr_migratory; |
| 551 | unsigned long rt_nr_total; |
| 552 | int overloaded; |
| 553 | struct plist_head pushable_tasks; |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 554 | #ifdef HAVE_RT_PUSH_IPI |
| 555 | int push_flags; |
| 556 | int push_cpu; |
| 557 | struct irq_work push_work; |
| 558 | raw_spinlock_t push_lock; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 559 | #endif |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 560 | #endif /* CONFIG_SMP */ |
Kirill Tkhai | f4ebcbc | 2014-03-15 02:15:00 +0400 | [diff] [blame] | 561 | int rt_queued; |
| 562 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 563 | int rt_throttled; |
| 564 | u64 rt_time; |
| 565 | u64 rt_runtime; |
| 566 | /* Nests inside the rq lock: */ |
| 567 | raw_spinlock_t rt_runtime_lock; |
| 568 | |
| 569 | #ifdef CONFIG_RT_GROUP_SCHED |
| 570 | unsigned long rt_nr_boosted; |
| 571 | |
| 572 | struct rq *rq; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 573 | struct task_group *tg; |
| 574 | #endif |
| 575 | }; |
| 576 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 577 | /* Deadline class' related fields in a runqueue */ |
| 578 | struct dl_rq { |
| 579 | /* runqueue is an rbtree, ordered by deadline */ |
| 580 | struct rb_root rb_root; |
| 581 | struct rb_node *rb_leftmost; |
| 582 | |
| 583 | unsigned long dl_nr_running; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 584 | |
| 585 | #ifdef CONFIG_SMP |
| 586 | /* |
| 587 | * Deadline values of the currently executing and the |
| 588 | * earliest ready task on this rq. Caching these facilitates |
| 589 | * the decision wether or not a ready but not running task |
| 590 | * should migrate somewhere else. |
| 591 | */ |
| 592 | struct { |
| 593 | u64 curr; |
| 594 | u64 next; |
| 595 | } earliest_dl; |
| 596 | |
| 597 | unsigned long dl_nr_migratory; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 598 | int overloaded; |
| 599 | |
| 600 | /* |
| 601 | * Tasks on this rq that can be pushed away. They are kept in |
| 602 | * an rb-tree, ordered by tasks' deadlines, with caching |
| 603 | * of the leftmost (earliest deadline) element. |
| 604 | */ |
| 605 | struct rb_root pushable_dl_tasks_root; |
| 606 | struct rb_node *pushable_dl_tasks_leftmost; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 607 | #else |
| 608 | struct dl_bw dl_bw; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 609 | #endif |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 610 | }; |
| 611 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 612 | #ifdef CONFIG_SMP |
| 613 | |
Dietmar Eggemann | bbb138b | 2015-09-26 18:19:54 +0100 | [diff] [blame] | 614 | struct max_cpu_capacity { |
| 615 | raw_spinlock_t lock; |
| 616 | unsigned long val; |
| 617 | int cpu; |
| 618 | }; |
| 619 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 620 | /* |
| 621 | * We add the notion of a root-domain which will be used to define per-domain |
| 622 | * variables. Each exclusive cpuset essentially defines an island domain by |
| 623 | * fully partitioning the member cpus from any other cpuset. Whenever a new |
| 624 | * exclusive cpuset is created, we also create and attach a new root-domain |
| 625 | * object. |
| 626 | * |
| 627 | */ |
| 628 | struct root_domain { |
| 629 | atomic_t refcount; |
| 630 | atomic_t rto_count; |
| 631 | struct rcu_head rcu; |
| 632 | cpumask_var_t span; |
| 633 | cpumask_var_t online; |
| 634 | |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 635 | /* Indicate more than one runnable task for any CPU */ |
| 636 | bool overload; |
| 637 | |
Morten Rasmussen | a562dfc | 2015-05-09 16:49:57 +0100 | [diff] [blame] | 638 | /* Indicate one or more cpus over-utilized (tipping point) */ |
| 639 | bool overutilized; |
| 640 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 641 | /* |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 642 | * The bit corresponding to a CPU gets set here if such CPU has more |
| 643 | * than one runnable -deadline task (as it is below for RT tasks). |
| 644 | */ |
| 645 | cpumask_var_t dlo_mask; |
| 646 | atomic_t dlo_count; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 647 | struct dl_bw dl_bw; |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 648 | struct cpudl cpudl; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 649 | |
| 650 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 651 | * The "RT overload" flag: it gets set if a CPU has more than |
| 652 | * one runnable RT task. |
| 653 | */ |
| 654 | cpumask_var_t rto_mask; |
| 655 | struct cpupri cpupri; |
Dietmar Eggemann | cd92bfd | 2016-08-01 19:53:35 +0100 | [diff] [blame] | 656 | |
Dietmar Eggemann | bbb138b | 2015-09-26 18:19:54 +0100 | [diff] [blame] | 657 | /* Maximum cpu capacity in the system. */ |
| 658 | struct max_cpu_capacity max_cpu_capacity; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 659 | }; |
| 660 | |
| 661 | extern struct root_domain def_root_domain; |
| 662 | |
| 663 | #endif /* CONFIG_SMP */ |
| 664 | |
| 665 | /* |
| 666 | * This is the main, per-CPU runqueue data structure. |
| 667 | * |
| 668 | * Locking rule: those places that want to lock multiple runqueues |
| 669 | * (such as the load balancing or the thread migration code), lock |
| 670 | * acquire operations must be ordered by ascending &runqueue. |
| 671 | */ |
| 672 | struct rq { |
| 673 | /* runqueue lock: */ |
| 674 | raw_spinlock_t lock; |
| 675 | |
| 676 | /* |
| 677 | * nr_running and cpu_load should be in the same cacheline because |
| 678 | * remote CPUs use both these fields when doing load calculation. |
| 679 | */ |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 680 | unsigned int nr_running; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 681 | #ifdef CONFIG_NUMA_BALANCING |
| 682 | unsigned int nr_numa_running; |
| 683 | unsigned int nr_preferred_running; |
| 684 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 685 | #define CPU_LOAD_IDX_MAX 5 |
| 686 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
Morten Rasmussen | 4c6a824 | 2016-02-25 12:47:54 +0000 | [diff] [blame] | 687 | unsigned int misfit_task; |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 688 | #ifdef CONFIG_NO_HZ_COMMON |
Frederic Weisbecker | 9fd81dd | 2016-04-19 17:36:51 +0200 | [diff] [blame] | 689 | #ifdef CONFIG_SMP |
| 690 | unsigned long last_load_update_tick; |
| 691 | #endif /* CONFIG_SMP */ |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 692 | unsigned long nohz_flags; |
Frederic Weisbecker | 9fd81dd | 2016-04-19 17:36:51 +0200 | [diff] [blame] | 693 | #endif /* CONFIG_NO_HZ_COMMON */ |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 694 | #ifdef CONFIG_NO_HZ_FULL |
| 695 | unsigned long last_sched_tick; |
| 696 | #endif |
Joseph Lo | 7750186 | 2013-04-22 14:39:18 +0800 | [diff] [blame] | 697 | |
| 698 | #ifdef CONFIG_CPU_QUIET |
| 699 | /* time-based average load */ |
| 700 | u64 nr_last_stamp; |
| 701 | u64 nr_running_integral; |
| 702 | seqcount_t ave_seqcnt; |
| 703 | #endif |
| 704 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 705 | /* capture load from *all* tasks on this cpu: */ |
| 706 | struct load_weight load; |
| 707 | unsigned long nr_load_updates; |
| 708 | u64 nr_switches; |
| 709 | |
| 710 | struct cfs_rq cfs; |
| 711 | struct rt_rq rt; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 712 | struct dl_rq dl; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 713 | |
| 714 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 715 | /* list of leaf cfs_rq on this cpu: */ |
| 716 | struct list_head leaf_cfs_rq_list; |
Peter Zijlstra | a35b646 | 2012-08-08 21:46:40 +0200 | [diff] [blame] | 717 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 718 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 719 | /* |
| 720 | * This is part of a global counter where only the total sum |
| 721 | * over all CPUs matters. A task can increase this counter on |
| 722 | * one CPU and if it got migrated afterwards it may decrease |
| 723 | * it on another CPU. Always updated under the runqueue lock: |
| 724 | */ |
| 725 | unsigned long nr_uninterruptible; |
| 726 | |
| 727 | struct task_struct *curr, *idle, *stop; |
| 728 | unsigned long next_balance; |
| 729 | struct mm_struct *prev_mm; |
| 730 | |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 731 | unsigned int clock_skip_update; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 732 | u64 clock; |
| 733 | u64 clock_task; |
| 734 | |
| 735 | atomic_t nr_iowait; |
| 736 | |
| 737 | #ifdef CONFIG_SMP |
| 738 | struct root_domain *rd; |
| 739 | struct sched_domain *sd; |
| 740 | |
Nicolas Pitre | ced549f | 2014-05-26 18:19:38 -0400 | [diff] [blame] | 741 | unsigned long cpu_capacity; |
Vincent Guittot | ca6d75e | 2015-02-27 16:54:09 +0100 | [diff] [blame] | 742 | unsigned long cpu_capacity_orig; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 743 | |
Peter Zijlstra | e3fca9e | 2015-06-11 14:46:37 +0200 | [diff] [blame] | 744 | struct callback_head *balance_callback; |
| 745 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 746 | unsigned char idle_balance; |
| 747 | /* For active balancing */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 748 | int active_balance; |
| 749 | int push_cpu; |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 750 | struct task_struct *push_task; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 751 | struct cpu_stop_work active_balance_work; |
| 752 | /* cpu of this runqueue: */ |
| 753 | int cpu; |
| 754 | int online; |
| 755 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 756 | struct list_head cfs_tasks; |
| 757 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 758 | u64 rt_avg; |
| 759 | u64 age_stamp; |
| 760 | u64 idle_stamp; |
| 761 | u64 avg_idle; |
Jason Low | 9bd721c | 2013-09-13 11:26:52 -0700 | [diff] [blame] | 762 | |
| 763 | /* This is used to determine avg_idle's max value */ |
| 764 | u64 max_idle_balance_cost; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 765 | #endif |
| 766 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 767 | #ifdef CONFIG_SCHED_HMP |
| 768 | struct sched_cluster *cluster; |
| 769 | struct cpumask freq_domain_cpumask; |
Channagoud Kadabi | 8810e5f | 2017-02-17 16:01:05 -0800 | [diff] [blame] | 770 | struct hmp_sched_stats hmp_stats; |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 771 | |
| 772 | int cstate, wakeup_latency, wakeup_energy; |
| 773 | u64 window_start; |
Vikram Mulukutla | 4b54aae | 2017-03-20 13:41:37 -0700 | [diff] [blame^] | 774 | u64 load_reported_window; |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 775 | unsigned long hmp_flags; |
| 776 | |
| 777 | u64 cur_irqload; |
| 778 | u64 avg_irqload; |
| 779 | u64 irqload_ts; |
| 780 | unsigned int static_cpu_pwr_cost; |
| 781 | struct task_struct *ed_task; |
| 782 | struct cpu_cycle cc; |
| 783 | u64 old_busy_time, old_busy_time_group; |
| 784 | u64 old_estimated_time; |
| 785 | u64 curr_runnable_sum; |
| 786 | u64 prev_runnable_sum; |
| 787 | u64 nt_curr_runnable_sum; |
| 788 | u64 nt_prev_runnable_sum; |
Pavankumar Kondeti | 6deb2c4 | 2017-01-09 13:56:33 +0530 | [diff] [blame] | 789 | struct group_cpu_time grp_time; |
Syed Rameez Mustafa | 59b5fb7 | 2016-05-31 16:40:45 -0700 | [diff] [blame] | 790 | struct load_subtractions load_subs[NUM_TRACKED_WINDOWS]; |
Syed Rameez Mustafa | 7ac74fc | 2016-06-07 15:18:37 -0700 | [diff] [blame] | 791 | DECLARE_BITMAP_ARRAY(top_tasks_bitmap, |
| 792 | NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES); |
Syed Rameez Mustafa | 59b5fb7 | 2016-05-31 16:40:45 -0700 | [diff] [blame] | 793 | u8 *top_tasks[NUM_TRACKED_WINDOWS]; |
| 794 | u8 curr_table; |
| 795 | int prev_top; |
| 796 | int curr_top; |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 797 | #endif |
| 798 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 799 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 800 | u64 prev_irq_time; |
| 801 | #endif |
| 802 | #ifdef CONFIG_PARAVIRT |
| 803 | u64 prev_steal_time; |
| 804 | #endif |
| 805 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 806 | u64 prev_steal_time_rq; |
| 807 | #endif |
| 808 | |
| 809 | /* calc_load related fields */ |
| 810 | unsigned long calc_load_update; |
| 811 | long calc_load_active; |
| 812 | |
| 813 | #ifdef CONFIG_SCHED_HRTICK |
| 814 | #ifdef CONFIG_SMP |
| 815 | int hrtick_csd_pending; |
| 816 | struct call_single_data hrtick_csd; |
| 817 | #endif |
| 818 | struct hrtimer hrtick_timer; |
| 819 | #endif |
| 820 | |
| 821 | #ifdef CONFIG_SCHEDSTATS |
| 822 | /* latency stats */ |
| 823 | struct sched_info rq_sched_info; |
| 824 | unsigned long long rq_cpu_time; |
| 825 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ |
| 826 | |
| 827 | /* sys_sched_yield() stats */ |
| 828 | unsigned int yld_count; |
| 829 | |
| 830 | /* schedule() stats */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 831 | unsigned int sched_count; |
| 832 | unsigned int sched_goidle; |
| 833 | |
| 834 | /* try_to_wake_up() stats */ |
| 835 | unsigned int ttwu_count; |
| 836 | unsigned int ttwu_local; |
| 837 | #endif |
| 838 | |
| 839 | #ifdef CONFIG_SMP |
| 840 | struct llist_head wake_list; |
| 841 | #endif |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 842 | |
| 843 | #ifdef CONFIG_CPU_IDLE |
| 844 | /* Must be inspected within a rcu lock section */ |
| 845 | struct cpuidle_state *idle_state; |
Morten Rasmussen | 0691064 | 2015-01-27 13:48:07 +0000 | [diff] [blame] | 846 | int idle_state_idx; |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 847 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 848 | }; |
| 849 | |
| 850 | static inline int cpu_of(struct rq *rq) |
| 851 | { |
| 852 | #ifdef CONFIG_SMP |
| 853 | return rq->cpu; |
| 854 | #else |
| 855 | return 0; |
| 856 | #endif |
| 857 | } |
| 858 | |
Peter Zijlstra | 1b568f0 | 2016-05-09 10:38:41 +0200 | [diff] [blame] | 859 | |
| 860 | #ifdef CONFIG_SCHED_SMT |
| 861 | |
| 862 | extern struct static_key_false sched_smt_present; |
| 863 | |
| 864 | extern void __update_idle_core(struct rq *rq); |
| 865 | |
| 866 | static inline void update_idle_core(struct rq *rq) |
| 867 | { |
| 868 | if (static_branch_unlikely(&sched_smt_present)) |
| 869 | __update_idle_core(rq); |
| 870 | } |
| 871 | |
| 872 | #else |
| 873 | static inline void update_idle_core(struct rq *rq) { } |
| 874 | #endif |
| 875 | |
Pranith Kumar | 8b06c55 | 2014-08-13 13:28:12 -0400 | [diff] [blame] | 876 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 877 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 878 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 879 | #define this_rq() this_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 880 | #define task_rq(p) cpu_rq(task_cpu(p)) |
| 881 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 882 | #define raw_rq() raw_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 883 | |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 884 | static inline u64 __rq_clock_broken(struct rq *rq) |
| 885 | { |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 886 | return READ_ONCE(rq->clock); |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 887 | } |
| 888 | |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 889 | static inline u64 rq_clock(struct rq *rq) |
| 890 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 891 | lockdep_assert_held(&rq->lock); |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 892 | return rq->clock; |
| 893 | } |
| 894 | |
| 895 | static inline u64 rq_clock_task(struct rq *rq) |
| 896 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 897 | lockdep_assert_held(&rq->lock); |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 898 | return rq->clock_task; |
| 899 | } |
| 900 | |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 901 | #define RQCF_REQ_SKIP 0x01 |
| 902 | #define RQCF_ACT_SKIP 0x02 |
| 903 | |
| 904 | static inline void rq_clock_skip_update(struct rq *rq, bool skip) |
| 905 | { |
| 906 | lockdep_assert_held(&rq->lock); |
| 907 | if (skip) |
| 908 | rq->clock_skip_update |= RQCF_REQ_SKIP; |
| 909 | else |
| 910 | rq->clock_skip_update &= ~RQCF_REQ_SKIP; |
| 911 | } |
| 912 | |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 913 | #ifdef CONFIG_NUMA |
Rik van Riel | e3fe70b | 2014-10-17 03:29:50 -0400 | [diff] [blame] | 914 | enum numa_topology_type { |
| 915 | NUMA_DIRECT, |
| 916 | NUMA_GLUELESS_MESH, |
| 917 | NUMA_BACKPLANE, |
| 918 | }; |
| 919 | extern enum numa_topology_type sched_numa_topology_type; |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 920 | extern int sched_max_numa_distance; |
| 921 | extern bool find_numa_distance(int distance); |
| 922 | #endif |
| 923 | |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 924 | #ifdef CONFIG_NUMA_BALANCING |
Iulia Manda | 44dba3d | 2014-10-31 02:13:31 +0200 | [diff] [blame] | 925 | /* The regions in numa_faults array from task_struct */ |
| 926 | enum numa_faults_stats { |
| 927 | NUMA_MEM = 0, |
| 928 | NUMA_CPU, |
| 929 | NUMA_MEMBUF, |
| 930 | NUMA_CPUBUF |
| 931 | }; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 932 | extern void sched_setnuma(struct task_struct *p, int node); |
Mel Gorman | e6628d5 | 2013-10-07 11:29:02 +0100 | [diff] [blame] | 933 | extern int migrate_task_to(struct task_struct *p, int cpu); |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 934 | extern int migrate_swap(struct task_struct *, struct task_struct *); |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 935 | #endif /* CONFIG_NUMA_BALANCING */ |
| 936 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 937 | #ifdef CONFIG_SMP |
| 938 | |
Peter Zijlstra | e3fca9e | 2015-06-11 14:46:37 +0200 | [diff] [blame] | 939 | static inline void |
| 940 | queue_balance_callback(struct rq *rq, |
| 941 | struct callback_head *head, |
| 942 | void (*func)(struct rq *rq)) |
| 943 | { |
| 944 | lockdep_assert_held(&rq->lock); |
| 945 | |
| 946 | if (unlikely(head->next)) |
| 947 | return; |
| 948 | |
| 949 | head->func = (void (*)(struct callback_head *))func; |
| 950 | head->next = rq->balance_callback; |
| 951 | rq->balance_callback = head; |
| 952 | } |
| 953 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 954 | extern void sched_ttwu_pending(void); |
| 955 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 956 | #define rcu_dereference_check_sched_domain(p) \ |
| 957 | rcu_dereference_check((p), \ |
| 958 | lockdep_is_held(&sched_domains_mutex)) |
| 959 | |
| 960 | /* |
| 961 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
| 962 | * See detach_destroy_domains: synchronize_sched for details. |
| 963 | * |
| 964 | * The domain tree of any CPU may only be accessed from within |
| 965 | * preempt-disabled sections. |
| 966 | */ |
| 967 | #define for_each_domain(cpu, __sd) \ |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 968 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ |
| 969 | __sd; __sd = __sd->parent) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 970 | |
Suresh Siddha | 77e8136 | 2011-11-17 11:08:23 -0800 | [diff] [blame] | 971 | #define for_each_lower_domain(sd) for (; sd; sd = sd->child) |
| 972 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 973 | /** |
| 974 | * highest_flag_domain - Return highest sched_domain containing flag. |
| 975 | * @cpu: The cpu whose highest level of sched domain is to |
| 976 | * be returned. |
| 977 | * @flag: The flag to check for the highest sched_domain |
| 978 | * for the given cpu. |
| 979 | * |
| 980 | * Returns the highest sched_domain of a cpu which contains the given flag. |
| 981 | */ |
| 982 | static inline struct sched_domain *highest_flag_domain(int cpu, int flag) |
| 983 | { |
| 984 | struct sched_domain *sd, *hsd = NULL; |
| 985 | |
| 986 | for_each_domain(cpu, sd) { |
| 987 | if (!(sd->flags & flag)) |
| 988 | break; |
| 989 | hsd = sd; |
| 990 | } |
| 991 | |
| 992 | return hsd; |
| 993 | } |
| 994 | |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 995 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) |
| 996 | { |
| 997 | struct sched_domain *sd; |
| 998 | |
| 999 | for_each_domain(cpu, sd) { |
| 1000 | if (sd->flags & flag) |
| 1001 | break; |
| 1002 | } |
| 1003 | |
| 1004 | return sd; |
| 1005 | } |
| 1006 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1007 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
Peter Zijlstra | 7d9ffa8 | 2013-07-04 12:56:46 +0800 | [diff] [blame] | 1008 | DECLARE_PER_CPU(int, sd_llc_size); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1009 | DECLARE_PER_CPU(int, sd_llc_id); |
Peter Zijlstra | 0e369d7 | 2016-05-09 10:38:01 +0200 | [diff] [blame] | 1010 | DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 1011 | DECLARE_PER_CPU(struct sched_domain *, sd_numa); |
Preeti U Murthy | 37dc6b5 | 2013-10-30 08:42:52 +0530 | [diff] [blame] | 1012 | DECLARE_PER_CPU(struct sched_domain *, sd_asym); |
Morten Rasmussen | 30786a0 | 2015-01-02 17:08:52 +0000 | [diff] [blame] | 1013 | DECLARE_PER_CPU(struct sched_domain *, sd_ea); |
Morten Rasmussen | 61bf625 | 2014-12-18 14:47:18 +0000 | [diff] [blame] | 1014 | DECLARE_PER_CPU(struct sched_domain *, sd_scs); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1015 | |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1016 | struct sched_group_capacity { |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1017 | atomic_t ref; |
| 1018 | /* |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 1019 | * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1020 | * for a single CPU. |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1021 | */ |
Morten Rasmussen | 5cdeb5f | 2016-02-25 12:43:49 +0000 | [diff] [blame] | 1022 | unsigned long capacity; |
| 1023 | unsigned long max_capacity; /* Max per-cpu capacity in group */ |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1024 | unsigned long next_update; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1025 | int imbalance; /* XXX unrelated to capacity but shared group state */ |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1026 | |
| 1027 | unsigned long cpumask[0]; /* iteration mask */ |
| 1028 | }; |
| 1029 | |
| 1030 | struct sched_group { |
| 1031 | struct sched_group *next; /* Must be a circular list */ |
| 1032 | atomic_t ref; |
| 1033 | |
| 1034 | unsigned int group_weight; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1035 | struct sched_group_capacity *sgc; |
Greg Hackmann | 2a3c6e6 | 2017-03-07 10:37:56 -0800 | [diff] [blame] | 1036 | const struct sched_group_energy *sge; |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1037 | |
| 1038 | /* |
| 1039 | * The CPUs this group covers. |
| 1040 | * |
| 1041 | * NOTE: this field is variable length. (Allocated dynamically |
| 1042 | * by attaching extra space to the end of the structure, |
| 1043 | * depending on how many CPUs the kernel has booted up with) |
| 1044 | */ |
| 1045 | unsigned long cpumask[0]; |
| 1046 | }; |
| 1047 | |
| 1048 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
| 1049 | { |
| 1050 | return to_cpumask(sg->cpumask); |
| 1051 | } |
| 1052 | |
| 1053 | /* |
| 1054 | * cpumask masking which cpus in the group are allowed to iterate up the domain |
| 1055 | * tree. |
| 1056 | */ |
| 1057 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) |
| 1058 | { |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1059 | return to_cpumask(sg->sgc->cpumask); |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1060 | } |
| 1061 | |
| 1062 | /** |
| 1063 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. |
| 1064 | * @group: The group whose first cpu is to be returned. |
| 1065 | */ |
| 1066 | static inline unsigned int group_first_cpu(struct sched_group *group) |
| 1067 | { |
| 1068 | return cpumask_first(sched_group_cpus(group)); |
| 1069 | } |
| 1070 | |
Peter Zijlstra | c117487 | 2012-05-31 14:47:33 +0200 | [diff] [blame] | 1071 | extern int group_balance_cpu(struct sched_group *sg); |
| 1072 | |
Steven Rostedt (Red Hat) | 3866e84 | 2016-02-22 16:26:51 -0500 | [diff] [blame] | 1073 | #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) |
| 1074 | void register_sched_domain_sysctl(void); |
| 1075 | void unregister_sched_domain_sysctl(void); |
| 1076 | #else |
| 1077 | static inline void register_sched_domain_sysctl(void) |
| 1078 | { |
| 1079 | } |
| 1080 | static inline void unregister_sched_domain_sysctl(void) |
| 1081 | { |
| 1082 | } |
| 1083 | #endif |
| 1084 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 1085 | #else |
| 1086 | |
| 1087 | static inline void sched_ttwu_pending(void) { } |
| 1088 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1089 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1090 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 1091 | #include "stats.h" |
| 1092 | #include "auto_group.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1093 | |
Pavankumar Kondeti | cbf7ea0 | 2017-01-11 15:11:23 +0530 | [diff] [blame] | 1094 | enum sched_boost_policy { |
| 1095 | SCHED_BOOST_NONE, |
| 1096 | SCHED_BOOST_ON_BIG, |
| 1097 | SCHED_BOOST_ON_ALL, |
| 1098 | }; |
| 1099 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 1100 | /* |
| 1101 | * Returns the rq capacity of any rq in a group. This does not play |
| 1102 | * well with groups where rq capacity can change independently. |
| 1103 | */ |
| 1104 | #define group_rq_capacity(group) cpu_capacity(group_first_cpu(group)) |
| 1105 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1106 | #ifdef CONFIG_CGROUP_SCHED |
| 1107 | |
| 1108 | /* |
| 1109 | * Return the group to which this tasks belongs. |
| 1110 | * |
Tejun Heo | 8af01f5 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 1111 | * We cannot use task_css() and friends because the cgroup subsystem |
| 1112 | * changes that value before the cgroup_subsys::attach() method is called, |
| 1113 | * therefore we cannot pin it and might observe the wrong value. |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 1114 | * |
| 1115 | * The same is true for autogroup's p->signal->autogroup->tg, the autogroup |
| 1116 | * core changes this before calling sched_move_task(). |
| 1117 | * |
| 1118 | * Instead we use a 'copy' which is updated from sched_move_task() while |
| 1119 | * holding both task_struct::pi_lock and rq::lock. |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1120 | */ |
| 1121 | static inline struct task_group *task_group(struct task_struct *p) |
| 1122 | { |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 1123 | return p->sched_task_group; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1124 | } |
| 1125 | |
| 1126 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| 1127 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| 1128 | { |
| 1129 | #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) |
| 1130 | struct task_group *tg = task_group(p); |
| 1131 | #endif |
| 1132 | |
| 1133 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Byungchul Park | ad936d8 | 2015-10-24 01:16:19 +0900 | [diff] [blame] | 1134 | set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1135 | p->se.cfs_rq = tg->cfs_rq[cpu]; |
| 1136 | p->se.parent = tg->se[cpu]; |
| 1137 | #endif |
| 1138 | |
| 1139 | #ifdef CONFIG_RT_GROUP_SCHED |
| 1140 | p->rt.rt_rq = tg->rt_rq[cpu]; |
| 1141 | p->rt.parent = tg->rt_se[cpu]; |
| 1142 | #endif |
| 1143 | } |
| 1144 | |
| 1145 | #else /* CONFIG_CGROUP_SCHED */ |
| 1146 | |
| 1147 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
| 1148 | static inline struct task_group *task_group(struct task_struct *p) |
| 1149 | { |
| 1150 | return NULL; |
| 1151 | } |
| 1152 | |
| 1153 | #endif /* CONFIG_CGROUP_SCHED */ |
| 1154 | |
| 1155 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1156 | { |
| 1157 | set_task_rq(p, cpu); |
| 1158 | #ifdef CONFIG_SMP |
| 1159 | /* |
| 1160 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
| 1161 | * successfuly executed on another CPU. We must ensure that updates of |
| 1162 | * per-task data have been completed by this moment. |
| 1163 | */ |
| 1164 | smp_wmb(); |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1165 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1166 | p->cpu = cpu; |
| 1167 | #else |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1168 | task_thread_info(p)->cpu = cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1169 | #endif |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 1170 | p->wake_cpu = cpu; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1171 | #endif |
| 1172 | } |
| 1173 | |
| 1174 | /* |
| 1175 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
| 1176 | */ |
| 1177 | #ifdef CONFIG_SCHED_DEBUG |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1178 | # include <linux/static_key.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1179 | # define const_debug __read_mostly |
| 1180 | #else |
| 1181 | # define const_debug const |
| 1182 | #endif |
| 1183 | |
| 1184 | extern const_debug unsigned int sysctl_sched_features; |
| 1185 | |
| 1186 | #define SCHED_FEAT(name, enabled) \ |
| 1187 | __SCHED_FEAT_##name , |
| 1188 | |
| 1189 | enum { |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 1190 | #include "features.h" |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1191 | __SCHED_FEAT_NR, |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1192 | }; |
| 1193 | |
| 1194 | #undef SCHED_FEAT |
| 1195 | |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1196 | #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1197 | #define SCHED_FEAT(name, enabled) \ |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1198 | static __always_inline bool static_branch_##name(struct static_key *key) \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1199 | { \ |
Jason Baron | 6e76ea8 | 2014-07-02 15:52:41 +0000 | [diff] [blame] | 1200 | return static_key_##enabled(key); \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1201 | } |
| 1202 | |
| 1203 | #include "features.h" |
| 1204 | |
| 1205 | #undef SCHED_FEAT |
| 1206 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1207 | extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1208 | #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) |
| 1209 | #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1210 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1211 | #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1212 | |
Srikar Dronamraju | 2a59572 | 2015-08-11 21:54:21 +0530 | [diff] [blame] | 1213 | extern struct static_key_false sched_numa_balancing; |
Mel Gorman | cb25176 | 2016-02-05 09:08:36 +0000 | [diff] [blame] | 1214 | extern struct static_key_false sched_schedstats; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1215 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1216 | static inline u64 global_rt_period(void) |
| 1217 | { |
| 1218 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
| 1219 | } |
| 1220 | |
| 1221 | static inline u64 global_rt_runtime(void) |
| 1222 | { |
| 1223 | if (sysctl_sched_rt_runtime < 0) |
| 1224 | return RUNTIME_INF; |
| 1225 | |
| 1226 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| 1227 | } |
| 1228 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1229 | static inline int task_current(struct rq *rq, struct task_struct *p) |
| 1230 | { |
| 1231 | return rq->curr == p; |
| 1232 | } |
| 1233 | |
| 1234 | static inline int task_running(struct rq *rq, struct task_struct *p) |
| 1235 | { |
| 1236 | #ifdef CONFIG_SMP |
| 1237 | return p->on_cpu; |
| 1238 | #else |
| 1239 | return task_current(rq, p); |
| 1240 | #endif |
| 1241 | } |
| 1242 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 1243 | static inline int task_on_rq_queued(struct task_struct *p) |
| 1244 | { |
| 1245 | return p->on_rq == TASK_ON_RQ_QUEUED; |
| 1246 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1247 | |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 1248 | static inline int task_on_rq_migrating(struct task_struct *p) |
| 1249 | { |
| 1250 | return p->on_rq == TASK_ON_RQ_MIGRATING; |
| 1251 | } |
| 1252 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1253 | #ifndef prepare_arch_switch |
| 1254 | # define prepare_arch_switch(next) do { } while (0) |
| 1255 | #endif |
Catalin Marinas | 01f23e1 | 2011-11-27 21:43:10 +0000 | [diff] [blame] | 1256 | #ifndef finish_arch_post_lock_switch |
| 1257 | # define finish_arch_post_lock_switch() do { } while (0) |
| 1258 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1259 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1260 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
| 1261 | { |
| 1262 | #ifdef CONFIG_SMP |
| 1263 | /* |
| 1264 | * We can optimise this out completely for !SMP, because the |
| 1265 | * SMP rebalancing from interrupt is the only thing that cares |
| 1266 | * here. |
| 1267 | */ |
| 1268 | next->on_cpu = 1; |
| 1269 | #endif |
| 1270 | } |
| 1271 | |
| 1272 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
| 1273 | { |
| 1274 | #ifdef CONFIG_SMP |
| 1275 | /* |
| 1276 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
| 1277 | * We must ensure this doesn't happen until the switch is completely |
| 1278 | * finished. |
Peter Zijlstra | 95913d9 | 2015-09-29 14:45:09 +0200 | [diff] [blame] | 1279 | * |
Peter Zijlstra | b75a225 | 2015-10-06 14:36:17 +0200 | [diff] [blame] | 1280 | * In particular, the load of prev->state in finish_task_switch() must |
| 1281 | * happen before this. |
| 1282 | * |
Peter Zijlstra | 1f03e8d | 2016-04-04 10:57:12 +0200 | [diff] [blame] | 1283 | * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1284 | */ |
Peter Zijlstra | 95913d9 | 2015-09-29 14:45:09 +0200 | [diff] [blame] | 1285 | smp_store_release(&prev->on_cpu, 0); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1286 | #endif |
| 1287 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 1288 | /* this is a valid case when another task releases the spinlock */ |
| 1289 | rq->lock.owner = current; |
| 1290 | #endif |
| 1291 | /* |
| 1292 | * If we are tracking spinlock dependencies then we have to |
| 1293 | * fix up the runqueue lock - which gets 'carried over' from |
| 1294 | * prev into current: |
| 1295 | */ |
| 1296 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
| 1297 | |
| 1298 | raw_spin_unlock_irq(&rq->lock); |
| 1299 | } |
| 1300 | |
Li Zefan | b13095f | 2013-03-05 16:06:38 +0800 | [diff] [blame] | 1301 | /* |
| 1302 | * wake flags |
| 1303 | */ |
| 1304 | #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ |
| 1305 | #define WF_FORK 0x02 /* child wakeup after fork */ |
| 1306 | #define WF_MIGRATED 0x4 /* internal use, task got migrated */ |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 1307 | #define WF_NO_NOTIFIER 0x08 /* do not notify governor */ |
Li Zefan | b13095f | 2013-03-05 16:06:38 +0800 | [diff] [blame] | 1308 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1309 | /* |
| 1310 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
| 1311 | * of tasks with abnormal "nice" values across CPUs the contribution that |
| 1312 | * each task makes to its run queue's load is weighted according to its |
| 1313 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
| 1314 | * scaled version of the new time slice allocation that they receive on time |
| 1315 | * slice expiry etc. |
| 1316 | */ |
| 1317 | |
| 1318 | #define WEIGHT_IDLEPRIO 3 |
| 1319 | #define WMULT_IDLEPRIO 1431655765 |
| 1320 | |
Andi Kleen | ed82b8a | 2015-11-29 20:59:43 -0800 | [diff] [blame] | 1321 | extern const int sched_prio_to_weight[40]; |
| 1322 | extern const u32 sched_prio_to_wmult[40]; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1323 | |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 1324 | /* |
| 1325 | * {de,en}queue flags: |
| 1326 | * |
| 1327 | * DEQUEUE_SLEEP - task is no longer runnable |
| 1328 | * ENQUEUE_WAKEUP - task just became runnable |
| 1329 | * |
| 1330 | * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks |
| 1331 | * are in a known state which allows modification. Such pairs |
| 1332 | * should preserve as much state as possible. |
| 1333 | * |
| 1334 | * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location |
| 1335 | * in the runqueue. |
| 1336 | * |
| 1337 | * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) |
| 1338 | * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) |
Peter Zijlstra | 59efa0b | 2016-05-10 18:24:37 +0200 | [diff] [blame] | 1339 | * ENQUEUE_MIGRATED - the task was migrated during wakeup |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 1340 | * |
| 1341 | */ |
| 1342 | |
| 1343 | #define DEQUEUE_SLEEP 0x01 |
| 1344 | #define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ |
| 1345 | #define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */ |
| 1346 | |
Peter Zijlstra | 1de6444 | 2015-09-30 17:44:13 +0200 | [diff] [blame] | 1347 | #define ENQUEUE_WAKEUP 0x01 |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 1348 | #define ENQUEUE_RESTORE 0x02 |
| 1349 | #define ENQUEUE_MOVE 0x04 |
| 1350 | |
| 1351 | #define ENQUEUE_HEAD 0x08 |
| 1352 | #define ENQUEUE_REPLENISH 0x10 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1353 | #ifdef CONFIG_SMP |
Peter Zijlstra | 59efa0b | 2016-05-10 18:24:37 +0200 | [diff] [blame] | 1354 | #define ENQUEUE_MIGRATED 0x20 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1355 | #else |
Peter Zijlstra | 59efa0b | 2016-05-10 18:24:37 +0200 | [diff] [blame] | 1356 | #define ENQUEUE_MIGRATED 0x00 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1357 | #endif |
Juri Lelli | 43aac89 | 2015-06-26 12:14:23 +0100 | [diff] [blame] | 1358 | #define ENQUEUE_WAKEUP_NEW 0x40 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1359 | |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1360 | #define RETRY_TASK ((void *)-1UL) |
| 1361 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1362 | struct sched_class { |
| 1363 | const struct sched_class *next; |
| 1364 | |
| 1365 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1366 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1367 | void (*yield_task) (struct rq *rq); |
| 1368 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); |
| 1369 | |
| 1370 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
| 1371 | |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1372 | /* |
| 1373 | * It is the responsibility of the pick_next_task() method that will |
| 1374 | * return the next task to call put_prev_task() on the @prev task or |
| 1375 | * something equivalent. |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1376 | * |
| 1377 | * May return RETRY_TASK when it finds a higher prio class has runnable |
| 1378 | * tasks. |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1379 | */ |
| 1380 | struct task_struct * (*pick_next_task) (struct rq *rq, |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 1381 | struct task_struct *prev, |
| 1382 | struct pin_cookie cookie); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1383 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
| 1384 | |
| 1385 | #ifdef CONFIG_SMP |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 1386 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
xiaofeng.yan | 5a4fd03 | 2015-09-23 14:55:59 +0800 | [diff] [blame] | 1387 | void (*migrate_task_rq)(struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1388 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1389 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
| 1390 | |
| 1391 | void (*set_cpus_allowed)(struct task_struct *p, |
| 1392 | const struct cpumask *newmask); |
| 1393 | |
| 1394 | void (*rq_online)(struct rq *rq); |
| 1395 | void (*rq_offline)(struct rq *rq); |
| 1396 | #endif |
| 1397 | |
| 1398 | void (*set_curr_task) (struct rq *rq); |
| 1399 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
| 1400 | void (*task_fork) (struct task_struct *p); |
Dario Faggioli | e6c390f | 2013-11-07 14:43:35 +0100 | [diff] [blame] | 1401 | void (*task_dead) (struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1402 | |
Kirill Tkhai | 67dfa1b | 2014-10-27 17:40:52 +0300 | [diff] [blame] | 1403 | /* |
| 1404 | * The switched_from() call is allowed to drop rq->lock, therefore we |
| 1405 | * cannot assume the switched_from/switched_to pair is serliazed by |
| 1406 | * rq->lock. They are however serialized by p->pi_lock. |
| 1407 | */ |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1408 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); |
| 1409 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); |
| 1410 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
| 1411 | int oldprio); |
| 1412 | |
| 1413 | unsigned int (*get_rr_interval) (struct rq *rq, |
| 1414 | struct task_struct *task); |
| 1415 | |
Stanislaw Gruszka | 6e99891 | 2014-11-12 16:58:44 +0100 | [diff] [blame] | 1416 | void (*update_curr) (struct rq *rq); |
| 1417 | |
Vincent Guittot | ea86cb4 | 2016-06-17 13:38:55 +0200 | [diff] [blame] | 1418 | #define TASK_SET_GROUP 0 |
| 1419 | #define TASK_MOVE_GROUP 1 |
| 1420 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1421 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Vincent Guittot | ea86cb4 | 2016-06-17 13:38:55 +0200 | [diff] [blame] | 1422 | void (*task_change_group) (struct task_struct *p, int type); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1423 | #endif |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 1424 | #ifdef CONFIG_SCHED_HMP |
| 1425 | void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p, |
| 1426 | u32 new_task_load, u32 new_pred_demand); |
| 1427 | #endif |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1428 | }; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1429 | |
Peter Zijlstra | 3f1d2a3 | 2014-02-12 10:49:30 +0100 | [diff] [blame] | 1430 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
| 1431 | { |
| 1432 | prev->sched_class->put_prev_task(rq, prev); |
| 1433 | } |
| 1434 | |
Peter Zijlstra | b2bf6c3 | 2016-09-20 22:00:38 +0200 | [diff] [blame] | 1435 | static inline void set_curr_task(struct rq *rq, struct task_struct *curr) |
| 1436 | { |
| 1437 | curr->sched_class->set_curr_task(rq); |
| 1438 | } |
| 1439 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1440 | #define sched_class_highest (&stop_sched_class) |
| 1441 | #define for_each_class(class) \ |
| 1442 | for (class = sched_class_highest; class; class = class->next) |
| 1443 | |
| 1444 | extern const struct sched_class stop_sched_class; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1445 | extern const struct sched_class dl_sched_class; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1446 | extern const struct sched_class rt_sched_class; |
| 1447 | extern const struct sched_class fair_sched_class; |
| 1448 | extern const struct sched_class idle_sched_class; |
| 1449 | |
| 1450 | |
| 1451 | #ifdef CONFIG_SMP |
| 1452 | |
Patrick Bellasi | 2178e84 | 2016-07-22 11:35:59 +0100 | [diff] [blame] | 1453 | extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc); |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1454 | extern void update_group_capacity(struct sched_domain *sd, int cpu); |
Li Zefan | b719203 | 2013-03-07 10:00:26 +0800 | [diff] [blame] | 1455 | |
Daniel Lezcano | 7caff66 | 2014-01-06 12:34:38 +0100 | [diff] [blame] | 1456 | extern void trigger_load_balance(struct rq *rq); |
Olav Haugan | 3f2cb30 | 2016-05-31 14:34:46 -0700 | [diff] [blame] | 1457 | extern void nohz_balance_clear_nohz_mask(int cpu); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1458 | |
Peter Zijlstra | c5b2803 | 2015-05-15 17:43:35 +0200 | [diff] [blame] | 1459 | extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); |
| 1460 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1461 | #endif |
| 1462 | |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1463 | #ifdef CONFIG_CPU_IDLE |
| 1464 | static inline void idle_set_state(struct rq *rq, |
| 1465 | struct cpuidle_state *idle_state) |
| 1466 | { |
| 1467 | rq->idle_state = idle_state; |
| 1468 | } |
| 1469 | |
| 1470 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1471 | { |
Peter Zijlstra | 9148a3a | 2016-09-20 22:34:51 +0200 | [diff] [blame] | 1472 | SCHED_WARN_ON(!rcu_read_lock_held()); |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1473 | return rq->idle_state; |
| 1474 | } |
Morten Rasmussen | 0691064 | 2015-01-27 13:48:07 +0000 | [diff] [blame] | 1475 | |
| 1476 | static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx) |
| 1477 | { |
| 1478 | rq->idle_state_idx = idle_state_idx; |
| 1479 | } |
| 1480 | |
| 1481 | static inline int idle_get_state_idx(struct rq *rq) |
| 1482 | { |
| 1483 | WARN_ON(!rcu_read_lock_held()); |
| 1484 | return rq->idle_state_idx; |
| 1485 | } |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1486 | #else |
| 1487 | static inline void idle_set_state(struct rq *rq, |
| 1488 | struct cpuidle_state *idle_state) |
| 1489 | { |
| 1490 | } |
| 1491 | |
| 1492 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1493 | { |
| 1494 | return NULL; |
| 1495 | } |
Morten Rasmussen | 0691064 | 2015-01-27 13:48:07 +0000 | [diff] [blame] | 1496 | |
| 1497 | static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx) |
| 1498 | { |
| 1499 | } |
| 1500 | |
| 1501 | static inline int idle_get_state_idx(struct rq *rq) |
| 1502 | { |
| 1503 | return -1; |
| 1504 | } |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1505 | #endif |
| 1506 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1507 | extern void sysrq_sched_debug_show(void); |
| 1508 | extern void sched_init_granularity(void); |
| 1509 | extern void update_max_interval(void); |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 1510 | |
| 1511 | extern void init_sched_dl_class(void); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1512 | extern void init_sched_rt_class(void); |
| 1513 | extern void init_sched_fair_class(void); |
| 1514 | |
Kirill Tkhai | 8875125 | 2014-06-29 00:03:57 +0400 | [diff] [blame] | 1515 | extern void resched_curr(struct rq *rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1516 | extern void resched_cpu(int cpu); |
| 1517 | |
| 1518 | extern struct rt_bandwidth def_rt_bandwidth; |
| 1519 | extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); |
| 1520 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1521 | extern struct dl_bandwidth def_dl_bandwidth; |
| 1522 | extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1523 | extern void init_dl_task_timer(struct sched_dl_entity *dl_se); |
| 1524 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1525 | unsigned long to_ratio(u64 period, u64 runtime); |
| 1526 | |
Yuyang Du | 540247f | 2015-07-15 08:04:39 +0800 | [diff] [blame] | 1527 | extern void init_entity_runnable_average(struct sched_entity *se); |
Yuyang Du | 2b8c41d | 2016-03-30 04:30:56 +0800 | [diff] [blame] | 1528 | extern void post_init_entity_util_avg(struct sched_entity *se); |
Alex Shi | a75cdaa | 2013-06-20 10:18:47 +0800 | [diff] [blame] | 1529 | |
Frederic Weisbecker | 76d92ac | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 1530 | #ifdef CONFIG_NO_HZ_FULL |
| 1531 | extern bool sched_can_stop_tick(struct rq *rq); |
| 1532 | |
| 1533 | /* |
| 1534 | * Tick may be needed by tasks in the runqueue depending on their policy and |
| 1535 | * requirements. If tick is needed, lets send the target an IPI to kick it out of |
| 1536 | * nohz mode if necessary. |
| 1537 | */ |
| 1538 | static inline void sched_update_tick_dependency(struct rq *rq) |
| 1539 | { |
| 1540 | int cpu; |
| 1541 | |
| 1542 | if (!tick_nohz_full_enabled()) |
| 1543 | return; |
| 1544 | |
| 1545 | cpu = cpu_of(rq); |
| 1546 | |
| 1547 | if (!tick_nohz_full_cpu(cpu)) |
| 1548 | return; |
| 1549 | |
| 1550 | if (sched_can_stop_tick(rq)) |
| 1551 | tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); |
| 1552 | else |
| 1553 | tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); |
| 1554 | } |
| 1555 | #else |
| 1556 | static inline void sched_update_tick_dependency(struct rq *rq) { } |
| 1557 | #endif |
| 1558 | |
Joseph Lo | 7750186 | 2013-04-22 14:39:18 +0800 | [diff] [blame] | 1559 | static inline void __add_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1560 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1561 | unsigned prev_nr = rq->nr_running; |
| 1562 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 1563 | sched_update_nr_prod(cpu_of(rq), count, true); |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1564 | rq->nr_running = prev_nr + count; |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1565 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1566 | if (prev_nr < 2 && rq->nr_running >= 2) { |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1567 | #ifdef CONFIG_SMP |
| 1568 | if (!rq->rd->overload) |
| 1569 | rq->rd->overload = true; |
| 1570 | #endif |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1571 | } |
Frederic Weisbecker | 76d92ac | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 1572 | |
| 1573 | sched_update_tick_dependency(rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1574 | } |
| 1575 | |
Joseph Lo | 7750186 | 2013-04-22 14:39:18 +0800 | [diff] [blame] | 1576 | static inline void __sub_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1577 | { |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 1578 | sched_update_nr_prod(cpu_of(rq), count, false); |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1579 | rq->nr_running -= count; |
Frederic Weisbecker | 76d92ac | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 1580 | /* Check if we still need preemption */ |
| 1581 | sched_update_tick_dependency(rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1582 | } |
| 1583 | |
Joseph Lo | 7750186 | 2013-04-22 14:39:18 +0800 | [diff] [blame] | 1584 | #ifdef CONFIG_CPU_QUIET |
| 1585 | #define NR_AVE_SCALE(x) ((x) << FSHIFT) |
| 1586 | static inline u64 do_nr_running_integral(struct rq *rq) |
| 1587 | { |
| 1588 | s64 nr, deltax; |
| 1589 | u64 nr_running_integral = rq->nr_running_integral; |
| 1590 | |
| 1591 | deltax = rq->clock_task - rq->nr_last_stamp; |
| 1592 | nr = NR_AVE_SCALE(rq->nr_running); |
| 1593 | |
| 1594 | nr_running_integral += nr * deltax; |
| 1595 | |
| 1596 | return nr_running_integral; |
| 1597 | } |
| 1598 | |
| 1599 | static inline void add_nr_running(struct rq *rq, unsigned count) |
| 1600 | { |
| 1601 | write_seqcount_begin(&rq->ave_seqcnt); |
| 1602 | rq->nr_running_integral = do_nr_running_integral(rq); |
| 1603 | rq->nr_last_stamp = rq->clock_task; |
| 1604 | __add_nr_running(rq, count); |
| 1605 | write_seqcount_end(&rq->ave_seqcnt); |
| 1606 | } |
| 1607 | |
| 1608 | static inline void sub_nr_running(struct rq *rq, unsigned count) |
| 1609 | { |
| 1610 | write_seqcount_begin(&rq->ave_seqcnt); |
| 1611 | rq->nr_running_integral = do_nr_running_integral(rq); |
| 1612 | rq->nr_last_stamp = rq->clock_task; |
| 1613 | __sub_nr_running(rq, count); |
| 1614 | write_seqcount_end(&rq->ave_seqcnt); |
| 1615 | } |
| 1616 | #else |
| 1617 | #define add_nr_running __add_nr_running |
| 1618 | #define sub_nr_running __sub_nr_running |
| 1619 | #endif |
| 1620 | |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 1621 | static inline void rq_last_tick_reset(struct rq *rq) |
| 1622 | { |
| 1623 | #ifdef CONFIG_NO_HZ_FULL |
| 1624 | rq->last_sched_tick = jiffies; |
| 1625 | #endif |
| 1626 | } |
| 1627 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1628 | extern void update_rq_clock(struct rq *rq); |
| 1629 | |
| 1630 | extern void activate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1631 | extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1632 | |
| 1633 | extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
| 1634 | |
| 1635 | extern const_debug unsigned int sysctl_sched_time_avg; |
| 1636 | extern const_debug unsigned int sysctl_sched_nr_migrate; |
| 1637 | extern const_debug unsigned int sysctl_sched_migration_cost; |
| 1638 | |
| 1639 | static inline u64 sched_avg_period(void) |
| 1640 | { |
| 1641 | return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; |
| 1642 | } |
| 1643 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1644 | #ifdef CONFIG_SCHED_HRTICK |
| 1645 | |
| 1646 | /* |
| 1647 | * Use hrtick when: |
| 1648 | * - enabled by features |
| 1649 | * - hrtimer is actually high res |
| 1650 | */ |
| 1651 | static inline int hrtick_enabled(struct rq *rq) |
| 1652 | { |
| 1653 | if (!sched_feat(HRTICK)) |
| 1654 | return 0; |
| 1655 | if (!cpu_active(cpu_of(rq))) |
| 1656 | return 0; |
| 1657 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
| 1658 | } |
| 1659 | |
| 1660 | void hrtick_start(struct rq *rq, u64 delay); |
| 1661 | |
Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 1662 | #else |
| 1663 | |
| 1664 | static inline int hrtick_enabled(struct rq *rq) |
| 1665 | { |
| 1666 | return 0; |
| 1667 | } |
| 1668 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1669 | #endif /* CONFIG_SCHED_HRTICK */ |
| 1670 | |
| 1671 | #ifdef CONFIG_SMP |
| 1672 | extern void sched_avg_update(struct rq *rq); |
Peter Zijlstra | dfbca41 | 2015-03-23 14:19:05 +0100 | [diff] [blame] | 1673 | |
| 1674 | #ifndef arch_scale_freq_capacity |
| 1675 | static __always_inline |
| 1676 | unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) |
| 1677 | { |
| 1678 | return SCHED_CAPACITY_SCALE; |
| 1679 | } |
| 1680 | #endif |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1681 | |
Morten Rasmussen | 8cd5601 | 2015-08-14 17:23:10 +0100 | [diff] [blame] | 1682 | #ifndef arch_scale_cpu_capacity |
| 1683 | static __always_inline |
| 1684 | unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) |
| 1685 | { |
Dietmar Eggemann | e3279a2 | 2015-08-15 00:04:41 +0100 | [diff] [blame] | 1686 | if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) |
Morten Rasmussen | 8cd5601 | 2015-08-14 17:23:10 +0100 | [diff] [blame] | 1687 | return sd->smt_gain / sd->span_weight; |
| 1688 | |
| 1689 | return SCHED_CAPACITY_SCALE; |
| 1690 | } |
| 1691 | #endif |
| 1692 | |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1693 | #ifdef CONFIG_SMP |
| 1694 | static inline unsigned long capacity_of(int cpu) |
| 1695 | { |
| 1696 | return cpu_rq(cpu)->cpu_capacity; |
| 1697 | } |
| 1698 | |
| 1699 | static inline unsigned long capacity_orig_of(int cpu) |
| 1700 | { |
| 1701 | return cpu_rq(cpu)->cpu_capacity_orig; |
| 1702 | } |
| 1703 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1704 | extern unsigned int sysctl_sched_use_walt_cpu_util; |
| 1705 | extern unsigned int walt_ravg_window; |
| 1706 | extern unsigned int walt_disabled; |
| 1707 | |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1708 | /* |
| 1709 | * cpu_util returns the amount of capacity of a CPU that is used by CFS |
| 1710 | * tasks. The unit of the return value must be the one of capacity so we can |
| 1711 | * compare the utilization with the capacity of the CPU that is available for |
| 1712 | * CFS task (ie cpu_capacity). |
| 1713 | * |
| 1714 | * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the |
| 1715 | * recent utilization of currently non-runnable tasks on a CPU. It represents |
| 1716 | * the amount of utilization of a CPU in the range [0..capacity_orig] where |
| 1717 | * capacity_orig is the cpu_capacity available at the highest frequency |
| 1718 | * (arch_scale_freq_capacity()). |
| 1719 | * The utilization of a CPU converges towards a sum equal to or less than the |
| 1720 | * current capacity (capacity_curr <= capacity_orig) of the CPU because it is |
| 1721 | * the running time on this CPU scaled by capacity_curr. |
| 1722 | * |
| 1723 | * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even |
| 1724 | * higher than capacity_orig because of unfortunate rounding in |
| 1725 | * cfs.avg.util_avg or just after migrating tasks and new task wakeups until |
| 1726 | * the average stabilizes with the new running time. We need to check that the |
| 1727 | * utilization stays within the range of [0..capacity_orig] and cap it if |
| 1728 | * necessary. Without utilization capping, a group could be seen as overloaded |
| 1729 | * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of |
| 1730 | * available capacity. We allow utilization to overshoot capacity_curr (but not |
| 1731 | * capacity_orig) as it useful for predicting the capacity required after task |
| 1732 | * migrations (scheduler-driven DVFS). |
| 1733 | */ |
| 1734 | static inline unsigned long __cpu_util(int cpu, int delta) |
| 1735 | { |
| 1736 | unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; |
| 1737 | unsigned long capacity = capacity_orig_of(cpu); |
| 1738 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1739 | #ifdef CONFIG_SCHED_WALT |
Amit Pundir | 102f7f4 | 2016-08-24 11:52:17 +0530 | [diff] [blame] | 1740 | if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { |
| 1741 | util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT; |
Channagoud Kadabi | 8810e5f | 2017-02-17 16:01:05 -0800 | [diff] [blame] | 1742 | do_div(util, walt_ravg_window); |
Amit Pundir | 102f7f4 | 2016-08-24 11:52:17 +0530 | [diff] [blame] | 1743 | } |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1744 | #endif |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1745 | delta += util; |
| 1746 | if (delta < 0) |
| 1747 | return 0; |
| 1748 | |
| 1749 | return (delta >= capacity) ? capacity : delta; |
| 1750 | } |
| 1751 | |
| 1752 | static inline unsigned long cpu_util(int cpu) |
| 1753 | { |
| 1754 | return __cpu_util(cpu, 0); |
| 1755 | } |
| 1756 | |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1757 | #endif |
| 1758 | |
Michael Turquette | 3b6188e | 2015-06-30 12:45:48 +0100 | [diff] [blame] | 1759 | #ifdef CONFIG_CPU_FREQ_GOV_SCHED |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1760 | #define capacity_max SCHED_CAPACITY_SCALE |
Michael Turquette | 3b6188e | 2015-06-30 12:45:48 +0100 | [diff] [blame] | 1761 | extern unsigned int capacity_margin; |
| 1762 | extern struct static_key __sched_freq; |
| 1763 | |
| 1764 | static inline bool sched_freq(void) |
| 1765 | { |
| 1766 | return static_key_false(&__sched_freq); |
| 1767 | } |
| 1768 | |
| 1769 | DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs); |
| 1770 | void update_cpu_capacity_request(int cpu, bool request); |
| 1771 | |
| 1772 | static inline void set_cfs_cpu_capacity(int cpu, bool request, |
| 1773 | unsigned long capacity) |
| 1774 | { |
Patrick Bellasi | c4eef1f | 2016-06-30 15:00:41 +0100 | [diff] [blame] | 1775 | struct sched_capacity_reqs *scr = &per_cpu(cpu_sched_capacity_reqs, cpu); |
| 1776 | |
| 1777 | #ifdef CONFIG_SCHED_WALT |
| 1778 | if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { |
| 1779 | int rtdl = scr->rt + scr->dl; |
| 1780 | /* |
| 1781 | * WALT tracks the utilization of a CPU considering the load |
| 1782 | * generated by all the scheduling classes. |
| 1783 | * Since the following call to: |
| 1784 | * update_cpu_capacity |
| 1785 | * is already adding the RT and DL utilizations let's remove |
| 1786 | * these contributions from the WALT signal. |
| 1787 | */ |
| 1788 | if (capacity > rtdl) |
| 1789 | capacity -= rtdl; |
| 1790 | else |
| 1791 | capacity = 0; |
| 1792 | } |
| 1793 | #endif |
| 1794 | if (scr->cfs != capacity) { |
| 1795 | scr->cfs = capacity; |
Michael Turquette | 3b6188e | 2015-06-30 12:45:48 +0100 | [diff] [blame] | 1796 | update_cpu_capacity_request(cpu, request); |
| 1797 | } |
| 1798 | } |
| 1799 | |
| 1800 | static inline void set_rt_cpu_capacity(int cpu, bool request, |
| 1801 | unsigned long capacity) |
| 1802 | { |
| 1803 | if (per_cpu(cpu_sched_capacity_reqs, cpu).rt != capacity) { |
| 1804 | per_cpu(cpu_sched_capacity_reqs, cpu).rt = capacity; |
| 1805 | update_cpu_capacity_request(cpu, request); |
| 1806 | } |
| 1807 | } |
| 1808 | |
| 1809 | static inline void set_dl_cpu_capacity(int cpu, bool request, |
| 1810 | unsigned long capacity) |
| 1811 | { |
| 1812 | if (per_cpu(cpu_sched_capacity_reqs, cpu).dl != capacity) { |
| 1813 | per_cpu(cpu_sched_capacity_reqs, cpu).dl = capacity; |
| 1814 | update_cpu_capacity_request(cpu, request); |
| 1815 | } |
| 1816 | } |
| 1817 | #else |
| 1818 | static inline bool sched_freq(void) { return false; } |
| 1819 | static inline void set_cfs_cpu_capacity(int cpu, bool request, |
| 1820 | unsigned long capacity) |
| 1821 | { } |
| 1822 | static inline void set_rt_cpu_capacity(int cpu, bool request, |
| 1823 | unsigned long capacity) |
| 1824 | { } |
| 1825 | static inline void set_dl_cpu_capacity(int cpu, bool request, |
| 1826 | unsigned long capacity) |
| 1827 | { } |
| 1828 | #endif |
| 1829 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1830 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1831 | { |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1832 | rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1833 | } |
| 1834 | #else |
| 1835 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } |
| 1836 | static inline void sched_avg_update(struct rq *rq) { } |
| 1837 | #endif |
| 1838 | |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1839 | struct rq_flags { |
| 1840 | unsigned long flags; |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 1841 | struct pin_cookie cookie; |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1842 | }; |
| 1843 | |
| 1844 | struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
Peter Zijlstra | 3e71a46 | 2016-04-28 16:16:33 +0200 | [diff] [blame] | 1845 | __acquires(rq->lock); |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1846 | struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1847 | __acquires(p->pi_lock) |
Peter Zijlstra | 3e71a46 | 2016-04-28 16:16:33 +0200 | [diff] [blame] | 1848 | __acquires(rq->lock); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1849 | |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1850 | static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1851 | __releases(rq->lock) |
| 1852 | { |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 1853 | lockdep_unpin_lock(&rq->lock, rf->cookie); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1854 | raw_spin_unlock(&rq->lock); |
| 1855 | } |
| 1856 | |
| 1857 | static inline void |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1858 | task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1859 | __releases(rq->lock) |
| 1860 | __releases(p->pi_lock) |
| 1861 | { |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 1862 | lockdep_unpin_lock(&rq->lock, rf->cookie); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1863 | raw_spin_unlock(&rq->lock); |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1864 | raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1865 | } |
| 1866 | |
Patrick Bellasi | d248900 | 2016-07-28 18:44:40 +0100 | [diff] [blame] | 1867 | extern struct rq *lock_rq_of(struct task_struct *p, struct rq_flags *flags); |
| 1868 | extern void unlock_rq_of(struct rq *rq, struct task_struct *p, struct rq_flags *flags); |
| 1869 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1870 | #ifdef CONFIG_SMP |
| 1871 | #ifdef CONFIG_PREEMPT |
| 1872 | |
| 1873 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); |
| 1874 | |
| 1875 | /* |
| 1876 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
| 1877 | * way at the expense of forcing extra atomic operations in all |
| 1878 | * invocations. This assures that the double_lock is acquired using the |
| 1879 | * same underlying policy as the spinlock_t on this architecture, which |
| 1880 | * reduces latency compared to the unfair variant below. However, it |
| 1881 | * also adds more overhead and therefore may reduce throughput. |
| 1882 | */ |
| 1883 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1884 | __releases(this_rq->lock) |
| 1885 | __acquires(busiest->lock) |
| 1886 | __acquires(this_rq->lock) |
| 1887 | { |
| 1888 | raw_spin_unlock(&this_rq->lock); |
| 1889 | double_rq_lock(this_rq, busiest); |
| 1890 | |
| 1891 | return 1; |
| 1892 | } |
| 1893 | |
| 1894 | #else |
| 1895 | /* |
| 1896 | * Unfair double_lock_balance: Optimizes throughput at the expense of |
| 1897 | * latency by eliminating extra atomic operations when the locks are |
| 1898 | * already in proper order on entry. This favors lower cpu-ids and will |
| 1899 | * grant the double lock to lower cpus over higher ids under contention, |
| 1900 | * regardless of entry order into the function. |
| 1901 | */ |
| 1902 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1903 | __releases(this_rq->lock) |
| 1904 | __acquires(busiest->lock) |
| 1905 | __acquires(this_rq->lock) |
| 1906 | { |
| 1907 | int ret = 0; |
| 1908 | |
| 1909 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
| 1910 | if (busiest < this_rq) { |
| 1911 | raw_spin_unlock(&this_rq->lock); |
| 1912 | raw_spin_lock(&busiest->lock); |
| 1913 | raw_spin_lock_nested(&this_rq->lock, |
| 1914 | SINGLE_DEPTH_NESTING); |
| 1915 | ret = 1; |
| 1916 | } else |
| 1917 | raw_spin_lock_nested(&busiest->lock, |
| 1918 | SINGLE_DEPTH_NESTING); |
| 1919 | } |
| 1920 | return ret; |
| 1921 | } |
| 1922 | |
| 1923 | #endif /* CONFIG_PREEMPT */ |
| 1924 | |
| 1925 | /* |
| 1926 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
| 1927 | */ |
| 1928 | static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1929 | { |
| 1930 | if (unlikely(!irqs_disabled())) { |
| 1931 | /* printk() doesn't work good under rq->lock */ |
| 1932 | raw_spin_unlock(&this_rq->lock); |
| 1933 | BUG_ON(1); |
| 1934 | } |
| 1935 | |
| 1936 | return _double_lock_balance(this_rq, busiest); |
| 1937 | } |
| 1938 | |
| 1939 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| 1940 | __releases(busiest->lock) |
| 1941 | { |
Todd Kjos | a31778a | 2016-07-04 15:04:45 +0100 | [diff] [blame] | 1942 | if (this_rq != busiest) |
| 1943 | raw_spin_unlock(&busiest->lock); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1944 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| 1945 | } |
| 1946 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 1947 | static inline void double_lock(spinlock_t *l1, spinlock_t *l2) |
| 1948 | { |
| 1949 | if (l1 > l2) |
| 1950 | swap(l1, l2); |
| 1951 | |
| 1952 | spin_lock(l1); |
| 1953 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1954 | } |
| 1955 | |
Mike Galbraith | 60e69ee | 2014-04-07 10:55:15 +0200 | [diff] [blame] | 1956 | static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) |
| 1957 | { |
| 1958 | if (l1 > l2) |
| 1959 | swap(l1, l2); |
| 1960 | |
| 1961 | spin_lock_irq(l1); |
| 1962 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1963 | } |
| 1964 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 1965 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
| 1966 | { |
| 1967 | if (l1 > l2) |
| 1968 | swap(l1, l2); |
| 1969 | |
| 1970 | raw_spin_lock(l1); |
| 1971 | raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1972 | } |
| 1973 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1974 | /* |
| 1975 | * double_rq_lock - safely lock two runqueues |
| 1976 | * |
| 1977 | * Note this does not disable interrupts like task_rq_lock, |
| 1978 | * you need to do so manually before calling. |
| 1979 | */ |
| 1980 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1981 | __acquires(rq1->lock) |
| 1982 | __acquires(rq2->lock) |
| 1983 | { |
| 1984 | BUG_ON(!irqs_disabled()); |
| 1985 | if (rq1 == rq2) { |
| 1986 | raw_spin_lock(&rq1->lock); |
| 1987 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1988 | } else { |
| 1989 | if (rq1 < rq2) { |
| 1990 | raw_spin_lock(&rq1->lock); |
| 1991 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| 1992 | } else { |
| 1993 | raw_spin_lock(&rq2->lock); |
| 1994 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| 1995 | } |
| 1996 | } |
| 1997 | } |
| 1998 | |
| 1999 | /* |
| 2000 | * double_rq_unlock - safely unlock two runqueues |
| 2001 | * |
| 2002 | * Note this does not restore interrupts like task_rq_unlock, |
| 2003 | * you need to do so manually after calling. |
| 2004 | */ |
| 2005 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 2006 | __releases(rq1->lock) |
| 2007 | __releases(rq2->lock) |
| 2008 | { |
| 2009 | raw_spin_unlock(&rq1->lock); |
| 2010 | if (rq1 != rq2) |
| 2011 | raw_spin_unlock(&rq2->lock); |
| 2012 | else |
| 2013 | __release(rq2->lock); |
| 2014 | } |
| 2015 | |
| 2016 | #else /* CONFIG_SMP */ |
| 2017 | |
| 2018 | /* |
| 2019 | * double_rq_lock - safely lock two runqueues |
| 2020 | * |
| 2021 | * Note this does not disable interrupts like task_rq_lock, |
| 2022 | * you need to do so manually before calling. |
| 2023 | */ |
| 2024 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 2025 | __acquires(rq1->lock) |
| 2026 | __acquires(rq2->lock) |
| 2027 | { |
| 2028 | BUG_ON(!irqs_disabled()); |
| 2029 | BUG_ON(rq1 != rq2); |
| 2030 | raw_spin_lock(&rq1->lock); |
| 2031 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 2032 | } |
| 2033 | |
| 2034 | /* |
| 2035 | * double_rq_unlock - safely unlock two runqueues |
| 2036 | * |
| 2037 | * Note this does not restore interrupts like task_rq_unlock, |
| 2038 | * you need to do so manually after calling. |
| 2039 | */ |
| 2040 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 2041 | __releases(rq1->lock) |
| 2042 | __releases(rq2->lock) |
| 2043 | { |
| 2044 | BUG_ON(rq1 != rq2); |
| 2045 | raw_spin_unlock(&rq1->lock); |
| 2046 | __release(rq2->lock); |
| 2047 | } |
| 2048 | |
| 2049 | #endif |
| 2050 | |
| 2051 | extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); |
| 2052 | extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); |
Srikar Dronamraju | 6b55c96 | 2015-06-25 22:51:41 +0530 | [diff] [blame] | 2053 | |
| 2054 | #ifdef CONFIG_SCHED_DEBUG |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2055 | extern void print_cfs_stats(struct seq_file *m, int cpu); |
| 2056 | extern void print_rt_stats(struct seq_file *m, int cpu); |
Wanpeng Li | acb3213 | 2014-10-31 06:39:33 +0800 | [diff] [blame] | 2057 | extern void print_dl_stats(struct seq_file *m, int cpu); |
Srikar Dronamraju | 6b55c96 | 2015-06-25 22:51:41 +0530 | [diff] [blame] | 2058 | extern void |
| 2059 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
Srikar Dronamraju | 397f237 | 2015-06-25 22:51:43 +0530 | [diff] [blame] | 2060 | |
| 2061 | #ifdef CONFIG_NUMA_BALANCING |
| 2062 | extern void |
| 2063 | show_numa_stats(struct task_struct *p, struct seq_file *m); |
| 2064 | extern void |
| 2065 | print_numa_stats(struct seq_file *m, int node, unsigned long tsf, |
| 2066 | unsigned long tpf, unsigned long gsf, unsigned long gpf); |
| 2067 | #endif /* CONFIG_NUMA_BALANCING */ |
| 2068 | #endif /* CONFIG_SCHED_DEBUG */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2069 | |
| 2070 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
Abel Vesa | 07c54f7 | 2015-03-03 13:50:27 +0200 | [diff] [blame] | 2071 | extern void init_rt_rq(struct rt_rq *rt_rq); |
| 2072 | extern void init_dl_rq(struct dl_rq *dl_rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2073 | |
Ben Segall | 1ee14e6 | 2013-10-16 11:16:12 -0700 | [diff] [blame] | 2074 | extern void cfs_bandwidth_usage_inc(void); |
| 2075 | extern void cfs_bandwidth_usage_dec(void); |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 2076 | |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 2077 | #ifdef CONFIG_NO_HZ_COMMON |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 2078 | enum rq_nohz_flag_bits { |
| 2079 | NOHZ_TICK_STOPPED, |
| 2080 | NOHZ_BALANCE_KICK, |
| 2081 | }; |
| 2082 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 2083 | #define NOHZ_KICK_ANY 0 |
| 2084 | #define NOHZ_KICK_RESTRICT 1 |
| 2085 | |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 2086 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) |
Thomas Gleixner | 20a5c8c | 2016-03-10 12:54:20 +0100 | [diff] [blame] | 2087 | |
| 2088 | extern void nohz_balance_exit_idle(unsigned int cpu); |
| 2089 | #else |
| 2090 | static inline void nohz_balance_exit_idle(unsigned int cpu) { } |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 2091 | #endif |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2092 | |
| 2093 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2094 | struct irqtime { |
| 2095 | u64 hardirq_time; |
| 2096 | u64 softirq_time; |
| 2097 | u64 irq_start_time; |
| 2098 | struct u64_stats_sync sync; |
| 2099 | }; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2100 | |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2101 | DECLARE_PER_CPU(struct irqtime, cpu_irqtime); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2102 | |
| 2103 | static inline u64 irq_time_read(int cpu) |
| 2104 | { |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2105 | struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); |
| 2106 | unsigned int seq; |
| 2107 | u64 total; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2108 | |
| 2109 | do { |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2110 | seq = __u64_stats_fetch_begin(&irqtime->sync); |
| 2111 | total = irqtime->softirq_time + irqtime->hardirq_time; |
| 2112 | } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2113 | |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2114 | return total; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2115 | } |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2116 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2117 | |
| 2118 | #ifdef CONFIG_CPU_FREQ |
| 2119 | DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); |
| 2120 | |
| 2121 | /** |
| 2122 | * cpufreq_update_util - Take a note about CPU utilization changes. |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 2123 | * @rq: Runqueue to carry out the update for. |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 2124 | * @flags: Update reason flags. |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2125 | * |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 2126 | * This function is called by the scheduler on the CPU whose utilization is |
| 2127 | * being updated. |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2128 | * |
| 2129 | * It can only be called from RCU-sched read-side critical sections. |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2130 | * |
| 2131 | * The way cpufreq is currently arranged requires it to evaluate the CPU |
| 2132 | * performance state (frequency/voltage) on a regular basis to prevent it from |
| 2133 | * being stuck in a completely inadequate performance level for too long. |
| 2134 | * That is not guaranteed to happen if the updates are only triggered from CFS, |
| 2135 | * though, because they may not be coming in if RT or deadline tasks are active |
| 2136 | * all the time (or there are RT and DL tasks only). |
| 2137 | * |
| 2138 | * As a workaround for that issue, this function is called by the RT and DL |
| 2139 | * sched classes to trigger extra cpufreq updates to prevent it from stalling, |
| 2140 | * but that really is a band-aid. Going forward it should be replaced with |
| 2141 | * solutions targeted more specifically at RT and DL tasks. |
| 2142 | */ |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 2143 | static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2144 | { |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 2145 | struct update_util_data *data; |
| 2146 | |
Vikram Mulukutla | 4b54aae | 2017-03-20 13:41:37 -0700 | [diff] [blame^] | 2147 | #ifdef CONFIG_SCHED_HMP |
| 2148 | /* |
| 2149 | * Skip if we've already reported, but not if this is an inter-cluster |
| 2150 | * migration |
| 2151 | */ |
| 2152 | if (!sched_disable_window_stats && |
| 2153 | (rq->load_reported_window == rq->window_start) && |
| 2154 | !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)) |
| 2155 | return; |
| 2156 | rq->load_reported_window = rq->window_start; |
| 2157 | #endif |
| 2158 | |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 2159 | data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); |
| 2160 | if (data) |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 2161 | data->func(data, rq_clock(rq), flags); |
| 2162 | } |
| 2163 | |
| 2164 | static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) |
| 2165 | { |
| 2166 | if (cpu_of(rq) == smp_processor_id()) |
| 2167 | cpufreq_update_util(rq, flags); |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2168 | } |
| 2169 | #else |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 2170 | static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} |
| 2171 | static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {} |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2172 | #endif /* CONFIG_CPU_FREQ */ |
Linus Torvalds | be53f58 | 2016-03-24 09:42:50 -0700 | [diff] [blame] | 2173 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 2174 | #ifdef arch_scale_freq_capacity |
| 2175 | #ifndef arch_scale_freq_invariant |
| 2176 | #define arch_scale_freq_invariant() (true) |
| 2177 | #endif |
| 2178 | #else /* arch_scale_freq_capacity */ |
| 2179 | #define arch_scale_freq_invariant() (false) |
| 2180 | #endif |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2181 | |
| 2182 | #ifdef CONFIG_SCHED_HMP |
| 2183 | |
| 2184 | static inline int cluster_first_cpu(struct sched_cluster *cluster) |
| 2185 | { |
| 2186 | return cpumask_first(&cluster->cpus); |
| 2187 | } |
| 2188 | |
| 2189 | struct related_thread_group { |
| 2190 | int id; |
| 2191 | raw_spinlock_t lock; |
| 2192 | struct list_head tasks; |
| 2193 | struct list_head list; |
| 2194 | struct sched_cluster *preferred_cluster; |
| 2195 | struct rcu_head rcu; |
| 2196 | u64 last_update; |
| 2197 | }; |
| 2198 | |
| 2199 | extern struct list_head cluster_head; |
| 2200 | extern int num_clusters; |
| 2201 | extern struct sched_cluster *sched_cluster[NR_CPUS]; |
| 2202 | |
| 2203 | #define for_each_sched_cluster(cluster) \ |
| 2204 | list_for_each_entry_rcu(cluster, &cluster_head, list) |
| 2205 | |
| 2206 | #define WINDOW_STATS_RECENT 0 |
| 2207 | #define WINDOW_STATS_MAX 1 |
| 2208 | #define WINDOW_STATS_MAX_RECENT_AVG 2 |
| 2209 | #define WINDOW_STATS_AVG 3 |
| 2210 | #define WINDOW_STATS_INVALID_POLICY 4 |
| 2211 | |
| 2212 | #define SCHED_UPMIGRATE_MIN_NICE 15 |
| 2213 | #define EXITING_TASK_MARKER 0xdeaddead |
| 2214 | |
| 2215 | #define UP_MIGRATION 1 |
| 2216 | #define DOWN_MIGRATION 2 |
| 2217 | #define IRQLOAD_MIGRATION 3 |
| 2218 | |
| 2219 | extern struct mutex policy_mutex; |
| 2220 | extern unsigned int sched_ravg_window; |
| 2221 | extern unsigned int sched_disable_window_stats; |
| 2222 | extern unsigned int max_possible_freq; |
| 2223 | extern unsigned int min_max_freq; |
| 2224 | extern unsigned int pct_task_load(struct task_struct *p); |
| 2225 | extern unsigned int max_possible_efficiency; |
| 2226 | extern unsigned int min_possible_efficiency; |
| 2227 | extern unsigned int max_capacity; |
| 2228 | extern unsigned int min_capacity; |
| 2229 | extern unsigned int max_load_scale_factor; |
| 2230 | extern unsigned int max_possible_capacity; |
| 2231 | extern unsigned int min_max_possible_capacity; |
| 2232 | extern unsigned int max_power_cost; |
| 2233 | extern unsigned int sched_init_task_load_windows; |
| 2234 | extern unsigned int up_down_migrate_scale_factor; |
| 2235 | extern unsigned int sysctl_sched_restrict_cluster_spill; |
| 2236 | extern unsigned int sched_pred_alert_load; |
| 2237 | extern struct sched_cluster init_cluster; |
| 2238 | extern unsigned int __read_mostly sched_short_sleep_task_threshold; |
| 2239 | extern unsigned int __read_mostly sched_long_cpu_selection_threshold; |
| 2240 | extern unsigned int __read_mostly sched_big_waker_task_load; |
| 2241 | extern unsigned int __read_mostly sched_small_wakee_task_load; |
| 2242 | extern unsigned int __read_mostly sched_spill_load; |
| 2243 | extern unsigned int __read_mostly sched_upmigrate; |
| 2244 | extern unsigned int __read_mostly sched_downmigrate; |
| 2245 | extern unsigned int __read_mostly sysctl_sched_spill_nr_run; |
| 2246 | extern unsigned int __read_mostly sched_load_granule; |
| 2247 | |
| 2248 | extern void init_new_task_load(struct task_struct *p, bool idle_task); |
| 2249 | extern u64 sched_ktime_clock(void); |
| 2250 | extern int got_boost_kick(void); |
| 2251 | extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); |
| 2252 | extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event, |
| 2253 | u64 wallclock, u64 irqtime); |
| 2254 | extern bool early_detection_notify(struct rq *rq, u64 wallclock); |
| 2255 | extern void clear_ed_task(struct task_struct *p, struct rq *rq); |
| 2256 | extern void fixup_busy_time(struct task_struct *p, int new_cpu); |
| 2257 | extern void clear_boost_kick(int cpu); |
| 2258 | extern void clear_hmp_request(int cpu); |
| 2259 | extern void mark_task_starting(struct task_struct *p); |
| 2260 | extern void set_window_start(struct rq *rq); |
| 2261 | extern void update_cluster_topology(void); |
| 2262 | extern void note_task_waking(struct task_struct *p, u64 wallclock); |
| 2263 | extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock); |
| 2264 | extern void init_clusters(void); |
| 2265 | extern void reset_cpu_hmp_stats(int cpu, int reset_cra); |
| 2266 | extern unsigned int max_task_load(void); |
| 2267 | extern void sched_account_irqtime(int cpu, struct task_struct *curr, |
| 2268 | u64 delta, u64 wallclock); |
| 2269 | extern void sched_account_irqstart(int cpu, struct task_struct *curr, |
| 2270 | u64 wallclock); |
| 2271 | extern unsigned int cpu_temp(int cpu); |
| 2272 | extern unsigned int nr_eligible_big_tasks(int cpu); |
| 2273 | extern int update_preferred_cluster(struct related_thread_group *grp, |
| 2274 | struct task_struct *p, u32 old_load); |
| 2275 | extern void set_preferred_cluster(struct related_thread_group *grp); |
| 2276 | extern void add_new_task_to_grp(struct task_struct *new); |
| 2277 | extern unsigned int update_freq_aggregate_threshold(unsigned int threshold); |
| 2278 | extern void update_avg_burst(struct task_struct *p); |
| 2279 | extern void update_avg(u64 *avg, u64 sample); |
| 2280 | |
| 2281 | #define NO_BOOST 0 |
| 2282 | #define FULL_THROTTLE_BOOST 1 |
| 2283 | #define CONSERVATIVE_BOOST 2 |
| 2284 | #define RESTRAINED_BOOST 3 |
| 2285 | |
| 2286 | static inline struct sched_cluster *cpu_cluster(int cpu) |
| 2287 | { |
| 2288 | return cpu_rq(cpu)->cluster; |
| 2289 | } |
| 2290 | |
| 2291 | static inline int cpu_capacity(int cpu) |
| 2292 | { |
| 2293 | return cpu_rq(cpu)->cluster->capacity; |
| 2294 | } |
| 2295 | |
| 2296 | static inline int cpu_max_possible_capacity(int cpu) |
| 2297 | { |
| 2298 | return cpu_rq(cpu)->cluster->max_possible_capacity; |
| 2299 | } |
| 2300 | |
| 2301 | static inline int cpu_load_scale_factor(int cpu) |
| 2302 | { |
| 2303 | return cpu_rq(cpu)->cluster->load_scale_factor; |
| 2304 | } |
| 2305 | |
| 2306 | static inline int cpu_efficiency(int cpu) |
| 2307 | { |
| 2308 | return cpu_rq(cpu)->cluster->efficiency; |
| 2309 | } |
| 2310 | |
| 2311 | static inline unsigned int cpu_cur_freq(int cpu) |
| 2312 | { |
| 2313 | return cpu_rq(cpu)->cluster->cur_freq; |
| 2314 | } |
| 2315 | |
| 2316 | static inline unsigned int cpu_min_freq(int cpu) |
| 2317 | { |
| 2318 | return cpu_rq(cpu)->cluster->min_freq; |
| 2319 | } |
| 2320 | |
| 2321 | static inline unsigned int cluster_max_freq(struct sched_cluster *cluster) |
| 2322 | { |
| 2323 | /* |
| 2324 | * Governor and thermal driver don't know the other party's mitigation |
| 2325 | * voting. So struct cluster saves both and return min() for current |
| 2326 | * cluster fmax. |
| 2327 | */ |
| 2328 | return min(cluster->max_mitigated_freq, cluster->max_freq); |
| 2329 | } |
| 2330 | |
| 2331 | static inline unsigned int cpu_max_freq(int cpu) |
| 2332 | { |
| 2333 | return cluster_max_freq(cpu_rq(cpu)->cluster); |
| 2334 | } |
| 2335 | |
| 2336 | static inline unsigned int cpu_max_possible_freq(int cpu) |
| 2337 | { |
| 2338 | return cpu_rq(cpu)->cluster->max_possible_freq; |
| 2339 | } |
| 2340 | |
| 2341 | static inline int same_cluster(int src_cpu, int dst_cpu) |
| 2342 | { |
| 2343 | return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster; |
| 2344 | } |
| 2345 | |
| 2346 | static inline int cpu_max_power_cost(int cpu) |
| 2347 | { |
| 2348 | return cpu_rq(cpu)->cluster->max_power_cost; |
| 2349 | } |
| 2350 | |
| 2351 | static inline int cpu_min_power_cost(int cpu) |
| 2352 | { |
| 2353 | return cpu_rq(cpu)->cluster->min_power_cost; |
| 2354 | } |
| 2355 | |
| 2356 | static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period) |
| 2357 | { |
| 2358 | return div64_u64(cycles, period); |
| 2359 | } |
| 2360 | |
| 2361 | static inline bool hmp_capable(void) |
| 2362 | { |
| 2363 | return max_possible_capacity != min_max_possible_capacity; |
| 2364 | } |
| 2365 | |
| 2366 | /* |
| 2367 | * 'load' is in reference to "best cpu" at its best frequency. |
| 2368 | * Scale that in reference to a given cpu, accounting for how bad it is |
| 2369 | * in reference to "best cpu". |
| 2370 | */ |
| 2371 | static inline u64 scale_load_to_cpu(u64 task_load, int cpu) |
| 2372 | { |
| 2373 | u64 lsf = cpu_load_scale_factor(cpu); |
| 2374 | |
| 2375 | if (lsf != 1024) { |
| 2376 | task_load *= lsf; |
| 2377 | task_load /= 1024; |
| 2378 | } |
| 2379 | |
| 2380 | return task_load; |
| 2381 | } |
| 2382 | |
| 2383 | static inline unsigned int task_load(struct task_struct *p) |
| 2384 | { |
| 2385 | return p->ravg.demand; |
| 2386 | } |
| 2387 | |
| 2388 | static inline void |
| 2389 | inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, |
| 2390 | struct task_struct *p) |
| 2391 | { |
| 2392 | u32 task_load; |
| 2393 | |
| 2394 | if (sched_disable_window_stats) |
| 2395 | return; |
| 2396 | |
| 2397 | task_load = sched_disable_window_stats ? 0 : p->ravg.demand; |
| 2398 | |
| 2399 | stats->cumulative_runnable_avg += task_load; |
| 2400 | stats->pred_demands_sum += p->ravg.pred_demand; |
| 2401 | } |
| 2402 | |
| 2403 | static inline void |
| 2404 | dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, |
| 2405 | struct task_struct *p) |
| 2406 | { |
| 2407 | u32 task_load; |
| 2408 | |
| 2409 | if (sched_disable_window_stats) |
| 2410 | return; |
| 2411 | |
| 2412 | task_load = sched_disable_window_stats ? 0 : p->ravg.demand; |
| 2413 | |
| 2414 | stats->cumulative_runnable_avg -= task_load; |
| 2415 | |
| 2416 | BUG_ON((s64)stats->cumulative_runnable_avg < 0); |
| 2417 | |
| 2418 | stats->pred_demands_sum -= p->ravg.pred_demand; |
| 2419 | BUG_ON((s64)stats->pred_demands_sum < 0); |
| 2420 | } |
| 2421 | |
| 2422 | static inline void |
| 2423 | fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats, |
| 2424 | struct task_struct *p, s64 task_load_delta, |
| 2425 | s64 pred_demand_delta) |
| 2426 | { |
| 2427 | if (sched_disable_window_stats) |
| 2428 | return; |
| 2429 | |
| 2430 | stats->cumulative_runnable_avg += task_load_delta; |
| 2431 | BUG_ON((s64)stats->cumulative_runnable_avg < 0); |
| 2432 | |
| 2433 | stats->pred_demands_sum += pred_demand_delta; |
| 2434 | BUG_ON((s64)stats->pred_demands_sum < 0); |
| 2435 | } |
| 2436 | |
| 2437 | #define pct_to_real(tunable) \ |
| 2438 | (div64_u64((u64)tunable * (u64)max_task_load(), 100)) |
| 2439 | |
| 2440 | #define real_to_pct(tunable) \ |
| 2441 | (div64_u64((u64)tunable * (u64)100, (u64)max_task_load())) |
| 2442 | |
| 2443 | #define SCHED_HIGH_IRQ_TIMEOUT 3 |
| 2444 | static inline u64 sched_irqload(int cpu) |
| 2445 | { |
| 2446 | struct rq *rq = cpu_rq(cpu); |
| 2447 | s64 delta; |
| 2448 | |
| 2449 | delta = get_jiffies_64() - rq->irqload_ts; |
| 2450 | /* |
| 2451 | * Current context can be preempted by irq and rq->irqload_ts can be |
| 2452 | * updated by irq context so that delta can be negative. |
| 2453 | * But this is okay and we can safely return as this means there |
| 2454 | * was recent irq occurrence. |
| 2455 | */ |
| 2456 | |
| 2457 | if (delta < SCHED_HIGH_IRQ_TIMEOUT) |
| 2458 | return rq->avg_irqload; |
| 2459 | else |
| 2460 | return 0; |
| 2461 | } |
| 2462 | |
| 2463 | static inline int sched_cpu_high_irqload(int cpu) |
| 2464 | { |
| 2465 | return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload; |
| 2466 | } |
| 2467 | |
| 2468 | static inline bool task_in_related_thread_group(struct task_struct *p) |
| 2469 | { |
| 2470 | return !!(rcu_access_pointer(p->grp) != NULL); |
| 2471 | } |
| 2472 | |
| 2473 | static inline |
| 2474 | struct related_thread_group *task_related_thread_group(struct task_struct *p) |
| 2475 | { |
| 2476 | return rcu_dereference(p->grp); |
| 2477 | } |
| 2478 | |
| 2479 | #define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand) |
| 2480 | |
| 2481 | extern void |
| 2482 | check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups); |
| 2483 | |
| 2484 | extern void notify_migration(int src_cpu, int dest_cpu, |
| 2485 | bool src_cpu_dead, struct task_struct *p); |
| 2486 | |
| 2487 | /* Is frequency of two cpus synchronized with each other? */ |
| 2488 | static inline int same_freq_domain(int src_cpu, int dst_cpu) |
| 2489 | { |
| 2490 | struct rq *rq = cpu_rq(src_cpu); |
| 2491 | |
| 2492 | if (src_cpu == dst_cpu) |
| 2493 | return 1; |
| 2494 | |
| 2495 | return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask); |
| 2496 | } |
| 2497 | |
| 2498 | #define BOOST_KICK 0 |
| 2499 | #define CPU_RESERVED 1 |
| 2500 | |
| 2501 | static inline int is_reserved(int cpu) |
| 2502 | { |
| 2503 | struct rq *rq = cpu_rq(cpu); |
| 2504 | |
| 2505 | return test_bit(CPU_RESERVED, &rq->hmp_flags); |
| 2506 | } |
| 2507 | |
| 2508 | static inline int mark_reserved(int cpu) |
| 2509 | { |
| 2510 | struct rq *rq = cpu_rq(cpu); |
| 2511 | |
| 2512 | /* Name boost_flags as hmp_flags? */ |
| 2513 | return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags); |
| 2514 | } |
| 2515 | |
| 2516 | static inline void clear_reserved(int cpu) |
| 2517 | { |
| 2518 | struct rq *rq = cpu_rq(cpu); |
| 2519 | |
| 2520 | clear_bit(CPU_RESERVED, &rq->hmp_flags); |
| 2521 | } |
| 2522 | |
| 2523 | static inline u64 cpu_cravg_sync(int cpu, int sync) |
| 2524 | { |
| 2525 | struct rq *rq = cpu_rq(cpu); |
| 2526 | u64 load; |
| 2527 | |
| 2528 | load = rq->hmp_stats.cumulative_runnable_avg; |
| 2529 | |
| 2530 | /* |
| 2531 | * If load is being checked in a sync wakeup environment, |
| 2532 | * we may want to discount the load of the currently running |
| 2533 | * task. |
| 2534 | */ |
| 2535 | if (sync && cpu == smp_processor_id()) { |
| 2536 | if (load > rq->curr->ravg.demand) |
| 2537 | load -= rq->curr->ravg.demand; |
| 2538 | else |
| 2539 | load = 0; |
| 2540 | } |
| 2541 | |
| 2542 | return load; |
| 2543 | } |
| 2544 | |
| 2545 | static inline bool is_short_burst_task(struct task_struct *p) |
| 2546 | { |
| 2547 | return p->ravg.avg_burst < sysctl_sched_short_burst && |
| 2548 | p->ravg.avg_sleep_time > sysctl_sched_short_sleep; |
| 2549 | } |
| 2550 | |
| 2551 | extern void check_for_migration(struct rq *rq, struct task_struct *p); |
| 2552 | extern void pre_big_task_count_change(const struct cpumask *cpus); |
| 2553 | extern void post_big_task_count_change(const struct cpumask *cpus); |
| 2554 | extern void set_hmp_defaults(void); |
| 2555 | extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost); |
| 2556 | extern unsigned int power_cost(int cpu, u64 demand); |
| 2557 | extern void reset_all_window_stats(u64 window_start, unsigned int window_size); |
| 2558 | extern int sched_boost(void); |
| 2559 | extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu, |
| 2560 | enum sched_boost_policy boost_policy); |
| 2561 | extern enum sched_boost_policy sched_boost_policy(void); |
| 2562 | extern int task_will_fit(struct task_struct *p, int cpu); |
| 2563 | extern u64 cpu_load(int cpu); |
| 2564 | extern u64 cpu_load_sync(int cpu, int sync); |
| 2565 | extern int preferred_cluster(struct sched_cluster *cluster, |
| 2566 | struct task_struct *p); |
| 2567 | extern void inc_nr_big_task(struct hmp_sched_stats *stats, |
| 2568 | struct task_struct *p); |
| 2569 | extern void dec_nr_big_task(struct hmp_sched_stats *stats, |
| 2570 | struct task_struct *p); |
| 2571 | extern void inc_rq_hmp_stats(struct rq *rq, |
| 2572 | struct task_struct *p, int change_cra); |
| 2573 | extern void dec_rq_hmp_stats(struct rq *rq, |
| 2574 | struct task_struct *p, int change_cra); |
| 2575 | extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra); |
| 2576 | extern int is_big_task(struct task_struct *p); |
| 2577 | extern int upmigrate_discouraged(struct task_struct *p); |
| 2578 | extern struct sched_cluster *rq_cluster(struct rq *rq); |
| 2579 | extern int nr_big_tasks(struct rq *rq); |
| 2580 | extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats, |
| 2581 | struct task_struct *p, s64 delta); |
| 2582 | extern void reset_task_stats(struct task_struct *p); |
| 2583 | extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra); |
| 2584 | extern void inc_hmp_sched_stats_fair(struct rq *rq, |
| 2585 | struct task_struct *p, int change_cra); |
| 2586 | extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css, |
| 2587 | struct cftype *cft); |
| 2588 | extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css, |
| 2589 | struct cftype *cft, u64 upmigrate_discourage); |
| 2590 | extern void sched_boost_parse_dt(void); |
| 2591 | extern void clear_top_tasks_bitmap(unsigned long *bitmap); |
| 2592 | |
| 2593 | #if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE) |
| 2594 | extern bool task_sched_boost(struct task_struct *p); |
| 2595 | extern int sync_cgroup_colocation(struct task_struct *p, bool insert); |
| 2596 | extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2); |
| 2597 | extern void update_cgroup_boost_settings(void); |
| 2598 | extern void restore_cgroup_boost_settings(void); |
| 2599 | |
| 2600 | #else |
| 2601 | static inline bool |
| 2602 | same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2) |
| 2603 | { |
| 2604 | return true; |
| 2605 | } |
| 2606 | |
| 2607 | static inline bool task_sched_boost(struct task_struct *p) |
| 2608 | { |
| 2609 | return true; |
| 2610 | } |
| 2611 | |
| 2612 | static inline void update_cgroup_boost_settings(void) { } |
| 2613 | static inline void restore_cgroup_boost_settings(void) { } |
| 2614 | #endif |
| 2615 | |
| 2616 | extern int alloc_related_thread_groups(void); |
| 2617 | |
| 2618 | extern unsigned long all_cluster_ids[]; |
| 2619 | |
| 2620 | #else /* CONFIG_SCHED_HMP */ |
| 2621 | |
| 2622 | struct hmp_sched_stats; |
| 2623 | struct related_thread_group; |
| 2624 | struct sched_cluster; |
| 2625 | |
| 2626 | static inline enum sched_boost_policy sched_boost_policy(void) |
| 2627 | { |
| 2628 | return SCHED_BOOST_NONE; |
| 2629 | } |
| 2630 | |
| 2631 | static inline bool task_sched_boost(struct task_struct *p) |
| 2632 | { |
| 2633 | return true; |
| 2634 | } |
| 2635 | |
| 2636 | static inline int got_boost_kick(void) |
| 2637 | { |
| 2638 | return 0; |
| 2639 | } |
| 2640 | |
| 2641 | static inline void update_task_ravg(struct task_struct *p, struct rq *rq, |
| 2642 | int event, u64 wallclock, u64 irqtime) { } |
| 2643 | |
| 2644 | static inline bool early_detection_notify(struct rq *rq, u64 wallclock) |
| 2645 | { |
| 2646 | return 0; |
| 2647 | } |
| 2648 | |
| 2649 | static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { } |
| 2650 | static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } |
| 2651 | static inline void clear_boost_kick(int cpu) { } |
| 2652 | static inline void clear_hmp_request(int cpu) { } |
| 2653 | static inline void mark_task_starting(struct task_struct *p) { } |
| 2654 | static inline void set_window_start(struct rq *rq) { } |
| 2655 | static inline void init_clusters(void) {} |
| 2656 | static inline void update_cluster_topology(void) { } |
| 2657 | static inline void note_task_waking(struct task_struct *p, u64 wallclock) { } |
| 2658 | static inline void set_task_last_switch_out(struct task_struct *p, |
| 2659 | u64 wallclock) { } |
| 2660 | |
| 2661 | static inline int task_will_fit(struct task_struct *p, int cpu) |
| 2662 | { |
| 2663 | return 1; |
| 2664 | } |
| 2665 | |
| 2666 | static inline unsigned int power_cost(int cpu, u64 demand) |
| 2667 | { |
| 2668 | return SCHED_CAPACITY_SCALE; |
| 2669 | } |
| 2670 | |
| 2671 | static inline int sched_boost(void) |
| 2672 | { |
| 2673 | return 0; |
| 2674 | } |
| 2675 | |
| 2676 | static inline int is_big_task(struct task_struct *p) |
| 2677 | { |
| 2678 | return 0; |
| 2679 | } |
| 2680 | |
| 2681 | static inline int nr_big_tasks(struct rq *rq) |
| 2682 | { |
| 2683 | return 0; |
| 2684 | } |
| 2685 | |
| 2686 | static inline int is_cpu_throttling_imminent(int cpu) |
| 2687 | { |
| 2688 | return 0; |
| 2689 | } |
| 2690 | |
| 2691 | static inline int is_task_migration_throttled(struct task_struct *p) |
| 2692 | { |
| 2693 | return 0; |
| 2694 | } |
| 2695 | |
| 2696 | static inline unsigned int cpu_temp(int cpu) |
| 2697 | { |
| 2698 | return 0; |
| 2699 | } |
| 2700 | |
| 2701 | static inline void |
| 2702 | inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { } |
| 2703 | |
| 2704 | static inline void |
| 2705 | dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { } |
| 2706 | |
| 2707 | static inline int |
| 2708 | preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) |
| 2709 | { |
| 2710 | return 1; |
| 2711 | } |
| 2712 | |
| 2713 | static inline struct sched_cluster *rq_cluster(struct rq *rq) |
| 2714 | { |
| 2715 | return NULL; |
| 2716 | } |
| 2717 | |
| 2718 | static inline void init_new_task_load(struct task_struct *p, bool idle_task) |
| 2719 | { |
| 2720 | } |
| 2721 | |
| 2722 | static inline u64 scale_load_to_cpu(u64 load, int cpu) |
| 2723 | { |
| 2724 | return load; |
| 2725 | } |
| 2726 | |
| 2727 | static inline unsigned int nr_eligible_big_tasks(int cpu) |
| 2728 | { |
| 2729 | return 0; |
| 2730 | } |
| 2731 | |
| 2732 | static inline int pct_task_load(struct task_struct *p) { return 0; } |
| 2733 | |
| 2734 | static inline int cpu_capacity(int cpu) |
| 2735 | { |
| 2736 | return SCHED_CAPACITY_SCALE; |
| 2737 | } |
| 2738 | |
| 2739 | static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; } |
| 2740 | |
| 2741 | static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, |
| 2742 | struct task_struct *p) |
| 2743 | { |
| 2744 | } |
| 2745 | |
| 2746 | static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, |
| 2747 | struct task_struct *p) |
| 2748 | { |
| 2749 | } |
| 2750 | |
| 2751 | static inline void sched_account_irqtime(int cpu, struct task_struct *curr, |
| 2752 | u64 delta, u64 wallclock) |
| 2753 | { |
| 2754 | } |
| 2755 | |
| 2756 | static inline void sched_account_irqstart(int cpu, struct task_struct *curr, |
| 2757 | u64 wallclock) |
| 2758 | { |
| 2759 | } |
| 2760 | |
| 2761 | static inline int sched_cpu_high_irqload(int cpu) { return 0; } |
| 2762 | |
| 2763 | static inline void set_preferred_cluster(struct related_thread_group *grp) { } |
| 2764 | |
| 2765 | static inline bool task_in_related_thread_group(struct task_struct *p) |
| 2766 | { |
| 2767 | return false; |
| 2768 | } |
| 2769 | |
| 2770 | static inline |
| 2771 | struct related_thread_group *task_related_thread_group(struct task_struct *p) |
| 2772 | { |
| 2773 | return NULL; |
| 2774 | } |
| 2775 | |
| 2776 | static inline u32 task_load(struct task_struct *p) { return 0; } |
| 2777 | |
| 2778 | static inline int update_preferred_cluster(struct related_thread_group *grp, |
| 2779 | struct task_struct *p, u32 old_load) |
| 2780 | { |
| 2781 | return 0; |
| 2782 | } |
| 2783 | |
| 2784 | static inline void add_new_task_to_grp(struct task_struct *new) {} |
| 2785 | |
| 2786 | #define PRED_DEMAND_DELTA (0) |
| 2787 | |
| 2788 | static inline void |
| 2789 | check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { } |
| 2790 | |
| 2791 | static inline void notify_migration(int src_cpu, int dest_cpu, |
| 2792 | bool src_cpu_dead, struct task_struct *p) { } |
| 2793 | |
| 2794 | static inline int same_freq_domain(int src_cpu, int dst_cpu) |
| 2795 | { |
| 2796 | return 1; |
| 2797 | } |
| 2798 | |
| 2799 | static inline void check_for_migration(struct rq *rq, struct task_struct *p) { } |
| 2800 | static inline void pre_big_task_count_change(void) { } |
| 2801 | static inline void post_big_task_count_change(void) { } |
| 2802 | static inline void set_hmp_defaults(void) { } |
| 2803 | |
| 2804 | static inline void clear_reserved(int cpu) { } |
| 2805 | static inline void sched_boost_parse_dt(void) {} |
| 2806 | static inline int alloc_related_thread_groups(void) { return 0; } |
| 2807 | |
| 2808 | #define trace_sched_cpu_load(...) |
| 2809 | #define trace_sched_cpu_load_lb(...) |
| 2810 | #define trace_sched_cpu_load_cgroup(...) |
| 2811 | #define trace_sched_cpu_load_wakeup(...) |
| 2812 | |
| 2813 | static inline void update_avg_burst(struct task_struct *p) {} |
| 2814 | |
| 2815 | #endif /* CONFIG_SCHED_HMP */ |