Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1 | |
| 2 | #include <linux/sched.h> |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 3 | #include <linux/sched/sysctl.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 4 | #include <linux/sched/rt.h> |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 5 | #include <linux/u64_stats_sync.h> |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 6 | #include <linux/sched/deadline.h> |
Steven Rostedt (Red Hat) | 3866e84 | 2016-02-22 16:26:51 -0500 | [diff] [blame] | 7 | #include <linux/binfmts.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 8 | #include <linux/mutex.h> |
| 9 | #include <linux/spinlock.h> |
| 10 | #include <linux/stop_machine.h> |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 11 | #include <linux/irq_work.h> |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 12 | #include <linux/tick.h> |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 13 | #include <linux/slab.h> |
Pavankumar Kondeti | c5927f1 | 2017-10-11 12:36:12 +0530 | [diff] [blame^] | 14 | #include <linux/sched_energy.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 15 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 16 | #include "cpupri.h" |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 17 | #include "cpudeadline.h" |
Li Zefan | 60fed78 | 2013-03-29 14:36:43 +0800 | [diff] [blame] | 18 | #include "cpuacct.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 19 | |
Peter Zijlstra | 9148a3a | 2016-09-20 22:34:51 +0200 | [diff] [blame] | 20 | #ifdef CONFIG_SCHED_DEBUG |
| 21 | #define SCHED_WARN_ON(x) WARN_ONCE(x, #x) |
| 22 | #else |
| 23 | #define SCHED_WARN_ON(x) ((void)(x)) |
| 24 | #endif |
| 25 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 26 | struct rq; |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 27 | struct cpuidle_state; |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 28 | |
Syed Rameez Mustafa | e14a233 | 2017-05-19 14:42:35 -0700 | [diff] [blame] | 29 | extern __read_mostly bool sched_predl; |
| 30 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 31 | #ifdef CONFIG_SCHED_WALT |
| 32 | extern unsigned int sched_ravg_window; |
Joonwoo Park | 858d575 | 2017-08-21 12:09:49 -0700 | [diff] [blame] | 33 | extern unsigned int walt_cpu_util_freq_divisor; |
Channagoud Kadabi | 8810e5f | 2017-02-17 16:01:05 -0800 | [diff] [blame] | 34 | |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 35 | struct walt_sched_stats { |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 36 | int nr_big_tasks; |
| 37 | u64 cumulative_runnable_avg; |
| 38 | u64 pred_demands_sum; |
| 39 | }; |
| 40 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 41 | struct cpu_cycle { |
| 42 | u64 cycles; |
| 43 | u64 time; |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 44 | }; |
| 45 | |
| 46 | struct group_cpu_time { |
| 47 | u64 curr_runnable_sum; |
| 48 | u64 prev_runnable_sum; |
| 49 | u64 nt_curr_runnable_sum; |
| 50 | u64 nt_prev_runnable_sum; |
| 51 | }; |
| 52 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 53 | struct load_subtractions { |
| 54 | u64 window_start; |
| 55 | u64 subs; |
| 56 | u64 new_subs; |
| 57 | }; |
| 58 | |
| 59 | #define NUM_TRACKED_WINDOWS 2 |
| 60 | #define NUM_LOAD_INDICES 1000 |
| 61 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 62 | struct sched_cluster { |
| 63 | raw_spinlock_t load_lock; |
| 64 | struct list_head list; |
| 65 | struct cpumask cpus; |
| 66 | int id; |
| 67 | int max_power_cost; |
| 68 | int min_power_cost; |
| 69 | int max_possible_capacity; |
| 70 | int capacity; |
| 71 | int efficiency; /* Differentiate cpus with different IPC capability */ |
| 72 | int load_scale_factor; |
| 73 | unsigned int exec_scale_factor; |
| 74 | /* |
| 75 | * max_freq = user maximum |
| 76 | * max_mitigated_freq = thermal defined maximum |
| 77 | * max_possible_freq = maximum supported by hardware |
| 78 | */ |
| 79 | unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq; |
| 80 | unsigned int max_possible_freq; |
| 81 | bool freq_init_done; |
| 82 | int dstate, dstate_wakeup_latency, dstate_wakeup_energy; |
| 83 | unsigned int static_cluster_pwr_cost; |
| 84 | int notifier_sent; |
| 85 | bool wake_up_idle; |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 86 | u64 aggr_grp_load; |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 87 | }; |
| 88 | |
Vikram Mulukutla | 4b54aae | 2017-03-20 13:41:37 -0700 | [diff] [blame] | 89 | extern unsigned int sched_disable_window_stats; |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 90 | |
| 91 | extern struct timer_list sched_grp_timer; |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 92 | #endif /* CONFIG_SCHED_WALT */ |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 93 | |
| 94 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 95 | /* task_struct::on_rq states: */ |
| 96 | #define TASK_ON_RQ_QUEUED 1 |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 97 | #define TASK_ON_RQ_MIGRATING 2 |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 98 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 99 | extern __read_mostly int scheduler_running; |
| 100 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 101 | extern unsigned long calc_load_update; |
| 102 | extern atomic_long_t calc_load_tasks; |
| 103 | |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 104 | extern void calc_global_load_tick(struct rq *this_rq); |
Thomas Gleixner | d60585c | 2016-07-12 18:33:56 +0200 | [diff] [blame] | 105 | extern long calc_load_fold_active(struct rq *this_rq, long adjust); |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 106 | |
| 107 | #ifdef CONFIG_SMP |
Frederic Weisbecker | cee1afc | 2016-04-13 15:56:50 +0200 | [diff] [blame] | 108 | extern void cpu_load_update_active(struct rq *this_rq); |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 109 | #else |
Frederic Weisbecker | cee1afc | 2016-04-13 15:56:50 +0200 | [diff] [blame] | 110 | static inline void cpu_load_update_active(struct rq *this_rq) { } |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 111 | #endif |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 112 | |
Greg Kroah-Hartman | a0a93e3 | 2017-07-19 09:58:49 +0200 | [diff] [blame] | 113 | #ifdef CONFIG_SCHED_SMT |
| 114 | extern void update_idle_core(struct rq *rq); |
| 115 | #else |
| 116 | static inline void update_idle_core(struct rq *rq) { } |
| 117 | #endif |
| 118 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 119 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 120 | * Helpers for converting nanosecond timing to jiffy resolution |
| 121 | */ |
| 122 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
| 123 | |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 124 | /* |
| 125 | * Increase resolution of nice-level calculations for 64-bit architectures. |
| 126 | * The extra resolution improves shares distribution and load balancing of |
| 127 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup |
| 128 | * hierarchies, especially on larger systems. This is not a user-visible change |
| 129 | * and does not change the user-interface for setting shares/weights. |
| 130 | * |
| 131 | * We increase resolution only if we have enough bits to allow this increased |
Peter Zijlstra | 2159197 | 2016-04-28 12:49:38 +0200 | [diff] [blame] | 132 | * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are |
| 133 | * pretty high and the returns do not justify the increased costs. |
| 134 | * |
| 135 | * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to |
| 136 | * increase coverage and consistency always enable it on 64bit platforms. |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 137 | */ |
Peter Zijlstra | 2159197 | 2016-04-28 12:49:38 +0200 | [diff] [blame] | 138 | #ifdef CONFIG_64BIT |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 139 | # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 140 | # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) |
| 141 | # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 142 | #else |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 143 | # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 144 | # define scale_load(w) (w) |
| 145 | # define scale_load_down(w) (w) |
| 146 | #endif |
| 147 | |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 148 | /* |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 149 | * Task weight (visible to users) and its load (invisible to users) have |
| 150 | * independent resolution, but they should be well calibrated. We use |
| 151 | * scale_load() and scale_load_down(w) to convert between them. The |
| 152 | * following must be true: |
| 153 | * |
| 154 | * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD |
| 155 | * |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 156 | */ |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 157 | #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 158 | |
| 159 | /* |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 160 | * Single value that decides SCHED_DEADLINE internal math precision. |
| 161 | * 10 -> just above 1us |
| 162 | * 9 -> just above 0.5us |
| 163 | */ |
| 164 | #define DL_SCALE (10) |
| 165 | |
| 166 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 167 | * These are the 'tuning knobs' of the scheduler: |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 168 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 169 | |
| 170 | /* |
| 171 | * single value that denotes runtime == period, ie unlimited time. |
| 172 | */ |
| 173 | #define RUNTIME_INF ((u64)~0ULL) |
| 174 | |
Henrik Austad | 20f9cd2 | 2015-09-09 17:00:41 +0200 | [diff] [blame] | 175 | static inline int idle_policy(int policy) |
| 176 | { |
| 177 | return policy == SCHED_IDLE; |
| 178 | } |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 179 | static inline int fair_policy(int policy) |
| 180 | { |
| 181 | return policy == SCHED_NORMAL || policy == SCHED_BATCH; |
| 182 | } |
| 183 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 184 | static inline int rt_policy(int policy) |
| 185 | { |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 186 | return policy == SCHED_FIFO || policy == SCHED_RR; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 187 | } |
| 188 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 189 | static inline int dl_policy(int policy) |
| 190 | { |
| 191 | return policy == SCHED_DEADLINE; |
| 192 | } |
Henrik Austad | 20f9cd2 | 2015-09-09 17:00:41 +0200 | [diff] [blame] | 193 | static inline bool valid_policy(int policy) |
| 194 | { |
| 195 | return idle_policy(policy) || fair_policy(policy) || |
| 196 | rt_policy(policy) || dl_policy(policy); |
| 197 | } |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 198 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 199 | static inline int task_has_rt_policy(struct task_struct *p) |
| 200 | { |
| 201 | return rt_policy(p->policy); |
| 202 | } |
| 203 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 204 | static inline int task_has_dl_policy(struct task_struct *p) |
| 205 | { |
| 206 | return dl_policy(p->policy); |
| 207 | } |
| 208 | |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 209 | /* |
| 210 | * Tells if entity @a should preempt entity @b. |
| 211 | */ |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 212 | static inline bool |
| 213 | dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 214 | { |
| 215 | return dl_time_before(a->deadline, b->deadline); |
| 216 | } |
| 217 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 218 | /* |
| 219 | * This is the priority-queue data structure of the RT scheduling class: |
| 220 | */ |
| 221 | struct rt_prio_array { |
| 222 | DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
| 223 | struct list_head queue[MAX_RT_PRIO]; |
| 224 | }; |
| 225 | |
| 226 | struct rt_bandwidth { |
| 227 | /* nests inside the rq lock: */ |
| 228 | raw_spinlock_t rt_runtime_lock; |
| 229 | ktime_t rt_period; |
| 230 | u64 rt_runtime; |
| 231 | struct hrtimer rt_period_timer; |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 232 | unsigned int rt_period_active; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 233 | }; |
Juri Lelli | a5e7be3 | 2014-09-19 10:22:39 +0100 | [diff] [blame] | 234 | |
| 235 | void __dl_clear_params(struct task_struct *p); |
| 236 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 237 | /* |
| 238 | * To keep the bandwidth of -deadline tasks and groups under control |
| 239 | * we need some place where: |
| 240 | * - store the maximum -deadline bandwidth of the system (the group); |
| 241 | * - cache the fraction of that bandwidth that is currently allocated. |
| 242 | * |
| 243 | * This is all done in the data structure below. It is similar to the |
| 244 | * one used for RT-throttling (rt_bandwidth), with the main difference |
| 245 | * that, since here we are only interested in admission control, we |
| 246 | * do not decrease any runtime while the group "executes", neither we |
| 247 | * need a timer to replenish it. |
| 248 | * |
| 249 | * With respect to SMP, the bandwidth is given on a per-CPU basis, |
| 250 | * meaning that: |
| 251 | * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; |
| 252 | * - dl_total_bw array contains, in the i-eth element, the currently |
| 253 | * allocated bandwidth on the i-eth CPU. |
| 254 | * Moreover, groups consume bandwidth on each CPU, while tasks only |
| 255 | * consume bandwidth on the CPU they're running on. |
| 256 | * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw |
| 257 | * that will be shown the next time the proc or cgroup controls will |
| 258 | * be red. It on its turn can be changed by writing on its own |
| 259 | * control. |
| 260 | */ |
| 261 | struct dl_bandwidth { |
| 262 | raw_spinlock_t dl_runtime_lock; |
| 263 | u64 dl_runtime; |
| 264 | u64 dl_period; |
| 265 | }; |
| 266 | |
| 267 | static inline int dl_bandwidth_enabled(void) |
| 268 | { |
Peter Zijlstra | 1724813 | 2013-12-17 12:44:49 +0100 | [diff] [blame] | 269 | return sysctl_sched_rt_runtime >= 0; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | extern struct dl_bw *dl_bw_of(int i); |
| 273 | |
| 274 | struct dl_bw { |
| 275 | raw_spinlock_t lock; |
| 276 | u64 bw, total_bw; |
| 277 | }; |
| 278 | |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 279 | static inline |
| 280 | void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) |
| 281 | { |
| 282 | dl_b->total_bw -= tsk_bw; |
| 283 | } |
| 284 | |
| 285 | static inline |
| 286 | void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) |
| 287 | { |
| 288 | dl_b->total_bw += tsk_bw; |
| 289 | } |
| 290 | |
| 291 | static inline |
| 292 | bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) |
| 293 | { |
| 294 | return dl_b->bw != -1 && |
| 295 | dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; |
| 296 | } |
| 297 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 298 | extern struct mutex sched_domains_mutex; |
| 299 | |
| 300 | #ifdef CONFIG_CGROUP_SCHED |
| 301 | |
| 302 | #include <linux/cgroup.h> |
| 303 | |
| 304 | struct cfs_rq; |
| 305 | struct rt_rq; |
| 306 | |
Mike Galbraith | 35cf4e5 | 2012-08-07 05:00:13 +0200 | [diff] [blame] | 307 | extern struct list_head task_groups; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 308 | |
| 309 | struct cfs_bandwidth { |
| 310 | #ifdef CONFIG_CFS_BANDWIDTH |
| 311 | raw_spinlock_t lock; |
| 312 | ktime_t period; |
| 313 | u64 quota, runtime; |
Zhihui Zhang | 9c58c79 | 2014-09-20 21:24:36 -0400 | [diff] [blame] | 314 | s64 hierarchical_quota; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 315 | u64 runtime_expires; |
| 316 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 317 | int idle, period_active; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 318 | struct hrtimer period_timer, slack_timer; |
| 319 | struct list_head throttled_cfs_rq; |
| 320 | |
| 321 | /* statistics */ |
| 322 | int nr_periods, nr_throttled; |
| 323 | u64 throttled_time; |
| 324 | #endif |
| 325 | }; |
| 326 | |
| 327 | /* task group related information */ |
| 328 | struct task_group { |
| 329 | struct cgroup_subsys_state css; |
| 330 | |
| 331 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 332 | /* schedulable entities of this group on each cpu */ |
| 333 | struct sched_entity **se; |
| 334 | /* runqueue "owned" by this group on each cpu */ |
| 335 | struct cfs_rq **cfs_rq; |
| 336 | unsigned long shares; |
| 337 | |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 338 | #ifdef CONFIG_SMP |
Waiman Long | b036762 | 2015-12-02 13:41:49 -0500 | [diff] [blame] | 339 | /* |
| 340 | * load_avg can be heavily contended at clock tick time, so put |
| 341 | * it in its own cacheline separated from the fields above which |
| 342 | * will also be accessed at each tick. |
| 343 | */ |
| 344 | atomic_long_t load_avg ____cacheline_aligned; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 345 | #endif |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 346 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 347 | |
| 348 | #ifdef CONFIG_RT_GROUP_SCHED |
| 349 | struct sched_rt_entity **rt_se; |
| 350 | struct rt_rq **rt_rq; |
| 351 | |
| 352 | struct rt_bandwidth rt_bandwidth; |
| 353 | #endif |
| 354 | |
| 355 | struct rcu_head rcu; |
| 356 | struct list_head list; |
| 357 | |
| 358 | struct task_group *parent; |
| 359 | struct list_head siblings; |
| 360 | struct list_head children; |
| 361 | |
| 362 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 363 | struct autogroup *autogroup; |
| 364 | #endif |
| 365 | |
| 366 | struct cfs_bandwidth cfs_bandwidth; |
| 367 | }; |
| 368 | |
| 369 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 370 | #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
| 371 | |
| 372 | /* |
| 373 | * A weight of 0 or 1 can cause arithmetics problems. |
| 374 | * A weight of a cfs_rq is the sum of weights of which entities |
| 375 | * are queued on this cfs_rq, so a weight of a entity should not be |
| 376 | * too large, so as the shares value of a task group. |
| 377 | * (The default weight is 1024 - so there's no practical |
| 378 | * limitation from this.) |
| 379 | */ |
| 380 | #define MIN_SHARES (1UL << 1) |
| 381 | #define MAX_SHARES (1UL << 18) |
| 382 | #endif |
| 383 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 384 | typedef int (*tg_visitor)(struct task_group *, void *); |
| 385 | |
| 386 | extern int walk_tg_tree_from(struct task_group *from, |
| 387 | tg_visitor down, tg_visitor up, void *data); |
| 388 | |
| 389 | /* |
| 390 | * Iterate the full tree, calling @down when first entering a node and @up when |
| 391 | * leaving it for the final time. |
| 392 | * |
| 393 | * Caller must hold rcu_lock or sufficient equivalent. |
| 394 | */ |
| 395 | static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
| 396 | { |
| 397 | return walk_tg_tree_from(&root_task_group, down, up, data); |
| 398 | } |
| 399 | |
| 400 | extern int tg_nop(struct task_group *tg, void *data); |
| 401 | |
| 402 | extern void free_fair_sched_group(struct task_group *tg); |
| 403 | extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); |
Peter Zijlstra | 8663e24 | 2016-06-22 14:58:02 +0200 | [diff] [blame] | 404 | extern void online_fair_sched_group(struct task_group *tg); |
Peter Zijlstra | 6fe1f34 | 2016-01-21 22:24:16 +0100 | [diff] [blame] | 405 | extern void unregister_fair_sched_group(struct task_group *tg); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 406 | extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
| 407 | struct sched_entity *se, int cpu, |
| 408 | struct sched_entity *parent); |
| 409 | extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 410 | |
| 411 | extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 77a4d1a | 2015-04-15 11:41:57 +0200 | [diff] [blame] | 412 | extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 413 | extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); |
| 414 | |
| 415 | extern void free_rt_sched_group(struct task_group *tg); |
| 416 | extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); |
| 417 | extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
| 418 | struct sched_rt_entity *rt_se, int cpu, |
| 419 | struct sched_rt_entity *parent); |
| 420 | |
Li Zefan | 25cc7da | 2013-03-05 16:07:33 +0800 | [diff] [blame] | 421 | extern struct task_group *sched_create_group(struct task_group *parent); |
| 422 | extern void sched_online_group(struct task_group *tg, |
| 423 | struct task_group *parent); |
| 424 | extern void sched_destroy_group(struct task_group *tg); |
| 425 | extern void sched_offline_group(struct task_group *tg); |
| 426 | |
| 427 | extern void sched_move_task(struct task_struct *tsk); |
| 428 | |
| 429 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 430 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
Byungchul Park | ad936d8 | 2015-10-24 01:16:19 +0900 | [diff] [blame] | 431 | |
| 432 | #ifdef CONFIG_SMP |
| 433 | extern void set_task_rq_fair(struct sched_entity *se, |
| 434 | struct cfs_rq *prev, struct cfs_rq *next); |
| 435 | #else /* !CONFIG_SMP */ |
| 436 | static inline void set_task_rq_fair(struct sched_entity *se, |
| 437 | struct cfs_rq *prev, struct cfs_rq *next) { } |
| 438 | #endif /* CONFIG_SMP */ |
| 439 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Li Zefan | 25cc7da | 2013-03-05 16:07:33 +0800 | [diff] [blame] | 440 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 441 | extern struct task_group *css_tg(struct cgroup_subsys_state *css); |
| 442 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 443 | #else /* CONFIG_CGROUP_SCHED */ |
| 444 | |
| 445 | struct cfs_bandwidth { }; |
| 446 | |
| 447 | #endif /* CONFIG_CGROUP_SCHED */ |
| 448 | |
| 449 | /* CFS-related fields in a runqueue */ |
| 450 | struct cfs_rq { |
| 451 | struct load_weight load; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 452 | unsigned int nr_running, h_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 453 | |
| 454 | u64 exec_clock; |
| 455 | u64 min_vruntime; |
| 456 | #ifndef CONFIG_64BIT |
| 457 | u64 min_vruntime_copy; |
| 458 | #endif |
| 459 | |
| 460 | struct rb_root tasks_timeline; |
| 461 | struct rb_node *rb_leftmost; |
| 462 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 463 | /* |
| 464 | * 'curr' points to currently running entity on this cfs_rq. |
| 465 | * It is set to NULL otherwise (i.e when none are currently running). |
| 466 | */ |
| 467 | struct sched_entity *curr, *next, *last, *skip; |
| 468 | |
| 469 | #ifdef CONFIG_SCHED_DEBUG |
| 470 | unsigned int nr_spread_over; |
| 471 | #endif |
| 472 | |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 473 | #ifdef CONFIG_SMP |
| 474 | /* |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 475 | * CFS load tracking |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 476 | */ |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 477 | struct sched_avg avg; |
Yuyang Du | 1396223 | 2015-07-15 08:04:41 +0800 | [diff] [blame] | 478 | u64 runnable_load_sum; |
| 479 | unsigned long runnable_load_avg; |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 480 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 481 | unsigned long tg_load_avg_contrib; |
Vincent Guittot | 96956e2 | 2016-11-08 10:53:44 +0100 | [diff] [blame] | 482 | unsigned long propagate_avg; |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 483 | #endif |
| 484 | atomic_long_t removed_load_avg, removed_util_avg; |
| 485 | #ifndef CONFIG_64BIT |
| 486 | u64 load_last_update_time_copy; |
| 487 | #endif |
Alex Shi | 141965c | 2013-06-26 13:05:39 +0800 | [diff] [blame] | 488 | |
Paul Turner | c566e8e | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 489 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 490 | /* |
| 491 | * h_load = weight * f(tg) |
| 492 | * |
| 493 | * Where f(tg) is the recursive weight fraction assigned to |
| 494 | * this group. |
| 495 | */ |
| 496 | unsigned long h_load; |
Vladimir Davydov | 6852079 | 2013-07-15 17:49:19 +0400 | [diff] [blame] | 497 | u64 last_h_load_update; |
| 498 | struct sched_entity *h_load_next; |
| 499 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 500 | #endif /* CONFIG_SMP */ |
| 501 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 502 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 503 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| 504 | |
| 505 | /* |
| 506 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
| 507 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
| 508 | * (like users, containers etc.) |
| 509 | * |
| 510 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This |
| 511 | * list is used during load balance. |
| 512 | */ |
| 513 | int on_list; |
| 514 | struct list_head leaf_cfs_rq_list; |
| 515 | struct task_group *tg; /* group that "owns" this runqueue */ |
| 516 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 517 | #ifdef CONFIG_CFS_BANDWIDTH |
Pavankumar Kondeti | 39c695e | 2017-07-20 16:05:51 +0530 | [diff] [blame] | 518 | |
| 519 | #ifdef CONFIG_SCHED_WALT |
| 520 | struct walt_sched_stats walt_stats; |
| 521 | #endif |
| 522 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 523 | int runtime_enabled; |
| 524 | u64 runtime_expires; |
| 525 | s64 runtime_remaining; |
| 526 | |
Paul Turner | f1b1728 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 527 | u64 throttled_clock, throttled_clock_task; |
| 528 | u64 throttled_clock_task_time; |
Peter Zijlstra | 55e16d3 | 2016-06-22 15:14:26 +0200 | [diff] [blame] | 529 | int throttled, throttle_count; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 530 | struct list_head throttled_list; |
| 531 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 532 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 533 | }; |
| 534 | |
| 535 | static inline int rt_bandwidth_enabled(void) |
| 536 | { |
| 537 | return sysctl_sched_rt_runtime >= 0; |
| 538 | } |
| 539 | |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 540 | /* RT IPI pull logic requires IRQ_WORK */ |
| 541 | #ifdef CONFIG_IRQ_WORK |
| 542 | # define HAVE_RT_PUSH_IPI |
| 543 | #endif |
| 544 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 545 | /* Real-Time classes' related field in a runqueue: */ |
| 546 | struct rt_rq { |
| 547 | struct rt_prio_array active; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 548 | unsigned int rt_nr_running; |
Frederic Weisbecker | 01d36d0 | 2015-11-04 18:17:10 +0100 | [diff] [blame] | 549 | unsigned int rr_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 550 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
| 551 | struct { |
| 552 | int curr; /* highest queued rt task prio */ |
| 553 | #ifdef CONFIG_SMP |
| 554 | int next; /* next highest */ |
| 555 | #endif |
| 556 | } highest_prio; |
| 557 | #endif |
| 558 | #ifdef CONFIG_SMP |
| 559 | unsigned long rt_nr_migratory; |
| 560 | unsigned long rt_nr_total; |
| 561 | int overloaded; |
| 562 | struct plist_head pushable_tasks; |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 563 | #ifdef HAVE_RT_PUSH_IPI |
| 564 | int push_flags; |
| 565 | int push_cpu; |
| 566 | struct irq_work push_work; |
| 567 | raw_spinlock_t push_lock; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 568 | #endif |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 569 | #endif /* CONFIG_SMP */ |
Kirill Tkhai | f4ebcbc | 2014-03-15 02:15:00 +0400 | [diff] [blame] | 570 | int rt_queued; |
| 571 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 572 | int rt_throttled; |
| 573 | u64 rt_time; |
| 574 | u64 rt_runtime; |
| 575 | /* Nests inside the rq lock: */ |
| 576 | raw_spinlock_t rt_runtime_lock; |
| 577 | |
| 578 | #ifdef CONFIG_RT_GROUP_SCHED |
| 579 | unsigned long rt_nr_boosted; |
| 580 | |
| 581 | struct rq *rq; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 582 | struct task_group *tg; |
| 583 | #endif |
| 584 | }; |
| 585 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 586 | /* Deadline class' related fields in a runqueue */ |
| 587 | struct dl_rq { |
| 588 | /* runqueue is an rbtree, ordered by deadline */ |
| 589 | struct rb_root rb_root; |
| 590 | struct rb_node *rb_leftmost; |
| 591 | |
| 592 | unsigned long dl_nr_running; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 593 | |
| 594 | #ifdef CONFIG_SMP |
| 595 | /* |
| 596 | * Deadline values of the currently executing and the |
| 597 | * earliest ready task on this rq. Caching these facilitates |
| 598 | * the decision wether or not a ready but not running task |
| 599 | * should migrate somewhere else. |
| 600 | */ |
| 601 | struct { |
| 602 | u64 curr; |
| 603 | u64 next; |
| 604 | } earliest_dl; |
| 605 | |
| 606 | unsigned long dl_nr_migratory; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 607 | int overloaded; |
| 608 | |
| 609 | /* |
| 610 | * Tasks on this rq that can be pushed away. They are kept in |
| 611 | * an rb-tree, ordered by tasks' deadlines, with caching |
| 612 | * of the leftmost (earliest deadline) element. |
| 613 | */ |
| 614 | struct rb_root pushable_dl_tasks_root; |
| 615 | struct rb_node *pushable_dl_tasks_leftmost; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 616 | #else |
| 617 | struct dl_bw dl_bw; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 618 | #endif |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 619 | }; |
| 620 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 621 | #ifdef CONFIG_SMP |
| 622 | |
Dietmar Eggemann | bbb138b | 2015-09-26 18:19:54 +0100 | [diff] [blame] | 623 | struct max_cpu_capacity { |
| 624 | raw_spinlock_t lock; |
| 625 | unsigned long val; |
| 626 | int cpu; |
| 627 | }; |
| 628 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 629 | /* |
| 630 | * We add the notion of a root-domain which will be used to define per-domain |
| 631 | * variables. Each exclusive cpuset essentially defines an island domain by |
| 632 | * fully partitioning the member cpus from any other cpuset. Whenever a new |
| 633 | * exclusive cpuset is created, we also create and attach a new root-domain |
| 634 | * object. |
| 635 | * |
| 636 | */ |
| 637 | struct root_domain { |
| 638 | atomic_t refcount; |
| 639 | atomic_t rto_count; |
| 640 | struct rcu_head rcu; |
| 641 | cpumask_var_t span; |
| 642 | cpumask_var_t online; |
| 643 | |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 644 | /* Indicate more than one runnable task for any CPU */ |
| 645 | bool overload; |
| 646 | |
Morten Rasmussen | a562dfc | 2015-05-09 16:49:57 +0100 | [diff] [blame] | 647 | /* Indicate one or more cpus over-utilized (tipping point) */ |
| 648 | bool overutilized; |
| 649 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 650 | /* |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 651 | * The bit corresponding to a CPU gets set here if such CPU has more |
| 652 | * than one runnable -deadline task (as it is below for RT tasks). |
| 653 | */ |
| 654 | cpumask_var_t dlo_mask; |
| 655 | atomic_t dlo_count; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 656 | struct dl_bw dl_bw; |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 657 | struct cpudl cpudl; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 658 | |
| 659 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 660 | * The "RT overload" flag: it gets set if a CPU has more than |
| 661 | * one runnable RT task. |
| 662 | */ |
| 663 | cpumask_var_t rto_mask; |
| 664 | struct cpupri cpupri; |
Dietmar Eggemann | cd92bfd | 2016-08-01 19:53:35 +0100 | [diff] [blame] | 665 | |
Dietmar Eggemann | bbb138b | 2015-09-26 18:19:54 +0100 | [diff] [blame] | 666 | /* Maximum cpu capacity in the system. */ |
| 667 | struct max_cpu_capacity max_cpu_capacity; |
Dietmar Eggemann | 14774e7 | 2017-01-08 16:16:59 +0000 | [diff] [blame] | 668 | |
| 669 | /* First cpu with maximum and minimum original capacity */ |
| 670 | int max_cap_orig_cpu, min_cap_orig_cpu; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 671 | }; |
| 672 | |
| 673 | extern struct root_domain def_root_domain; |
| 674 | |
| 675 | #endif /* CONFIG_SMP */ |
| 676 | |
| 677 | /* |
| 678 | * This is the main, per-CPU runqueue data structure. |
| 679 | * |
| 680 | * Locking rule: those places that want to lock multiple runqueues |
| 681 | * (such as the load balancing or the thread migration code), lock |
| 682 | * acquire operations must be ordered by ascending &runqueue. |
| 683 | */ |
| 684 | struct rq { |
| 685 | /* runqueue lock: */ |
| 686 | raw_spinlock_t lock; |
| 687 | |
| 688 | /* |
| 689 | * nr_running and cpu_load should be in the same cacheline because |
| 690 | * remote CPUs use both these fields when doing load calculation. |
| 691 | */ |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 692 | unsigned int nr_running; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 693 | #ifdef CONFIG_NUMA_BALANCING |
| 694 | unsigned int nr_numa_running; |
| 695 | unsigned int nr_preferred_running; |
| 696 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 697 | #define CPU_LOAD_IDX_MAX 5 |
| 698 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
Morten Rasmussen | 4c6a824 | 2016-02-25 12:47:54 +0000 | [diff] [blame] | 699 | unsigned int misfit_task; |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 700 | #ifdef CONFIG_NO_HZ_COMMON |
Frederic Weisbecker | 9fd81dd | 2016-04-19 17:36:51 +0200 | [diff] [blame] | 701 | #ifdef CONFIG_SMP |
| 702 | unsigned long last_load_update_tick; |
| 703 | #endif /* CONFIG_SMP */ |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 704 | unsigned long nohz_flags; |
Frederic Weisbecker | 9fd81dd | 2016-04-19 17:36:51 +0200 | [diff] [blame] | 705 | #endif /* CONFIG_NO_HZ_COMMON */ |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 706 | #ifdef CONFIG_NO_HZ_FULL |
| 707 | unsigned long last_sched_tick; |
| 708 | #endif |
Joseph Lo | 7750186 | 2013-04-22 14:39:18 +0800 | [diff] [blame] | 709 | |
| 710 | #ifdef CONFIG_CPU_QUIET |
| 711 | /* time-based average load */ |
| 712 | u64 nr_last_stamp; |
| 713 | u64 nr_running_integral; |
| 714 | seqcount_t ave_seqcnt; |
| 715 | #endif |
| 716 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 717 | /* capture load from *all* tasks on this cpu: */ |
| 718 | struct load_weight load; |
| 719 | unsigned long nr_load_updates; |
| 720 | u64 nr_switches; |
| 721 | |
| 722 | struct cfs_rq cfs; |
| 723 | struct rt_rq rt; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 724 | struct dl_rq dl; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 725 | |
| 726 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 727 | /* list of leaf cfs_rq on this cpu: */ |
| 728 | struct list_head leaf_cfs_rq_list; |
Vincent Guittot | 96956e2 | 2016-11-08 10:53:44 +0100 | [diff] [blame] | 729 | struct list_head *tmp_alone_branch; |
Peter Zijlstra | a35b646 | 2012-08-08 21:46:40 +0200 | [diff] [blame] | 730 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 731 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 732 | /* |
| 733 | * This is part of a global counter where only the total sum |
| 734 | * over all CPUs matters. A task can increase this counter on |
| 735 | * one CPU and if it got migrated afterwards it may decrease |
| 736 | * it on another CPU. Always updated under the runqueue lock: |
| 737 | */ |
| 738 | unsigned long nr_uninterruptible; |
| 739 | |
| 740 | struct task_struct *curr, *idle, *stop; |
| 741 | unsigned long next_balance; |
| 742 | struct mm_struct *prev_mm; |
| 743 | |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 744 | unsigned int clock_skip_update; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 745 | u64 clock; |
| 746 | u64 clock_task; |
| 747 | |
| 748 | atomic_t nr_iowait; |
| 749 | |
| 750 | #ifdef CONFIG_SMP |
| 751 | struct root_domain *rd; |
| 752 | struct sched_domain *sd; |
| 753 | |
Nicolas Pitre | ced549f | 2014-05-26 18:19:38 -0400 | [diff] [blame] | 754 | unsigned long cpu_capacity; |
Vincent Guittot | ca6d75e | 2015-02-27 16:54:09 +0100 | [diff] [blame] | 755 | unsigned long cpu_capacity_orig; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 756 | |
Peter Zijlstra | e3fca9e | 2015-06-11 14:46:37 +0200 | [diff] [blame] | 757 | struct callback_head *balance_callback; |
| 758 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 759 | unsigned char idle_balance; |
| 760 | /* For active balancing */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 761 | int active_balance; |
| 762 | int push_cpu; |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 763 | struct task_struct *push_task; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 764 | struct cpu_stop_work active_balance_work; |
| 765 | /* cpu of this runqueue: */ |
| 766 | int cpu; |
| 767 | int online; |
| 768 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 769 | struct list_head cfs_tasks; |
| 770 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 771 | u64 rt_avg; |
| 772 | u64 age_stamp; |
| 773 | u64 idle_stamp; |
| 774 | u64 avg_idle; |
Jason Low | 9bd721c | 2013-09-13 11:26:52 -0700 | [diff] [blame] | 775 | |
| 776 | /* This is used to determine avg_idle's max value */ |
| 777 | u64 max_idle_balance_cost; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 778 | #endif |
| 779 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 780 | #ifdef CONFIG_SCHED_WALT |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 781 | struct sched_cluster *cluster; |
| 782 | struct cpumask freq_domain_cpumask; |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 783 | struct walt_sched_stats walt_stats; |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 784 | |
| 785 | int cstate, wakeup_latency, wakeup_energy; |
| 786 | u64 window_start; |
Joonwoo Park | 84a8088 | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 787 | s64 cum_window_start; |
Vikram Mulukutla | 4b54aae | 2017-03-20 13:41:37 -0700 | [diff] [blame] | 788 | u64 load_reported_window; |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 789 | unsigned long walt_flags; |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 790 | |
| 791 | u64 cur_irqload; |
| 792 | u64 avg_irqload; |
| 793 | u64 irqload_ts; |
| 794 | unsigned int static_cpu_pwr_cost; |
| 795 | struct task_struct *ed_task; |
| 796 | struct cpu_cycle cc; |
| 797 | u64 old_busy_time, old_busy_time_group; |
| 798 | u64 old_estimated_time; |
| 799 | u64 curr_runnable_sum; |
| 800 | u64 prev_runnable_sum; |
| 801 | u64 nt_curr_runnable_sum; |
| 802 | u64 nt_prev_runnable_sum; |
Joonwoo Park | 84a8088 | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 803 | u64 cum_window_demand; |
Pavankumar Kondeti | 6deb2c4 | 2017-01-09 13:56:33 +0530 | [diff] [blame] | 804 | struct group_cpu_time grp_time; |
Syed Rameez Mustafa | 59b5fb7 | 2016-05-31 16:40:45 -0700 | [diff] [blame] | 805 | struct load_subtractions load_subs[NUM_TRACKED_WINDOWS]; |
Syed Rameez Mustafa | 7ac74fc | 2016-06-07 15:18:37 -0700 | [diff] [blame] | 806 | DECLARE_BITMAP_ARRAY(top_tasks_bitmap, |
| 807 | NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES); |
Syed Rameez Mustafa | 59b5fb7 | 2016-05-31 16:40:45 -0700 | [diff] [blame] | 808 | u8 *top_tasks[NUM_TRACKED_WINDOWS]; |
| 809 | u8 curr_table; |
| 810 | int prev_top; |
| 811 | int curr_top; |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 812 | #endif |
| 813 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 814 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 815 | u64 prev_irq_time; |
| 816 | #endif |
| 817 | #ifdef CONFIG_PARAVIRT |
| 818 | u64 prev_steal_time; |
| 819 | #endif |
| 820 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 821 | u64 prev_steal_time_rq; |
| 822 | #endif |
| 823 | |
| 824 | /* calc_load related fields */ |
| 825 | unsigned long calc_load_update; |
| 826 | long calc_load_active; |
| 827 | |
| 828 | #ifdef CONFIG_SCHED_HRTICK |
| 829 | #ifdef CONFIG_SMP |
| 830 | int hrtick_csd_pending; |
| 831 | struct call_single_data hrtick_csd; |
| 832 | #endif |
| 833 | struct hrtimer hrtick_timer; |
| 834 | #endif |
| 835 | |
| 836 | #ifdef CONFIG_SCHEDSTATS |
| 837 | /* latency stats */ |
| 838 | struct sched_info rq_sched_info; |
| 839 | unsigned long long rq_cpu_time; |
| 840 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ |
| 841 | |
| 842 | /* sys_sched_yield() stats */ |
| 843 | unsigned int yld_count; |
| 844 | |
| 845 | /* schedule() stats */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 846 | unsigned int sched_count; |
| 847 | unsigned int sched_goidle; |
| 848 | |
| 849 | /* try_to_wake_up() stats */ |
| 850 | unsigned int ttwu_count; |
| 851 | unsigned int ttwu_local; |
Chris Redpath | 385dcec | 2017-06-03 15:03:03 +0100 | [diff] [blame] | 852 | #ifdef CONFIG_SMP |
Dietmar Eggemann | af88a16 | 2017-03-22 18:23:13 +0000 | [diff] [blame] | 853 | struct eas_stats eas_stats; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 854 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 855 | #endif |
| 856 | |
| 857 | #ifdef CONFIG_SMP |
| 858 | struct llist_head wake_list; |
| 859 | #endif |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 860 | |
| 861 | #ifdef CONFIG_CPU_IDLE |
| 862 | /* Must be inspected within a rcu lock section */ |
| 863 | struct cpuidle_state *idle_state; |
Morten Rasmussen | 0691064 | 2015-01-27 13:48:07 +0000 | [diff] [blame] | 864 | int idle_state_idx; |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 865 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 866 | }; |
| 867 | |
| 868 | static inline int cpu_of(struct rq *rq) |
| 869 | { |
| 870 | #ifdef CONFIG_SMP |
| 871 | return rq->cpu; |
| 872 | #else |
| 873 | return 0; |
| 874 | #endif |
| 875 | } |
| 876 | |
Pranith Kumar | 8b06c55 | 2014-08-13 13:28:12 -0400 | [diff] [blame] | 877 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 878 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 879 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 880 | #define this_rq() this_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 881 | #define task_rq(p) cpu_rq(task_cpu(p)) |
| 882 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 883 | #define raw_rq() raw_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 884 | |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 885 | static inline u64 __rq_clock_broken(struct rq *rq) |
| 886 | { |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 887 | return READ_ONCE(rq->clock); |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 888 | } |
| 889 | |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 890 | static inline u64 rq_clock(struct rq *rq) |
| 891 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 892 | lockdep_assert_held(&rq->lock); |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 893 | return rq->clock; |
| 894 | } |
| 895 | |
| 896 | static inline u64 rq_clock_task(struct rq *rq) |
| 897 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 898 | lockdep_assert_held(&rq->lock); |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 899 | return rq->clock_task; |
| 900 | } |
| 901 | |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 902 | #define RQCF_REQ_SKIP 0x01 |
| 903 | #define RQCF_ACT_SKIP 0x02 |
| 904 | |
| 905 | static inline void rq_clock_skip_update(struct rq *rq, bool skip) |
| 906 | { |
| 907 | lockdep_assert_held(&rq->lock); |
| 908 | if (skip) |
| 909 | rq->clock_skip_update |= RQCF_REQ_SKIP; |
| 910 | else |
| 911 | rq->clock_skip_update &= ~RQCF_REQ_SKIP; |
| 912 | } |
| 913 | |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 914 | #ifdef CONFIG_NUMA |
Rik van Riel | e3fe70b | 2014-10-17 03:29:50 -0400 | [diff] [blame] | 915 | enum numa_topology_type { |
| 916 | NUMA_DIRECT, |
| 917 | NUMA_GLUELESS_MESH, |
| 918 | NUMA_BACKPLANE, |
| 919 | }; |
| 920 | extern enum numa_topology_type sched_numa_topology_type; |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 921 | extern int sched_max_numa_distance; |
| 922 | extern bool find_numa_distance(int distance); |
| 923 | #endif |
| 924 | |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 925 | #ifdef CONFIG_NUMA_BALANCING |
Iulia Manda | 44dba3d | 2014-10-31 02:13:31 +0200 | [diff] [blame] | 926 | /* The regions in numa_faults array from task_struct */ |
| 927 | enum numa_faults_stats { |
| 928 | NUMA_MEM = 0, |
| 929 | NUMA_CPU, |
| 930 | NUMA_MEMBUF, |
| 931 | NUMA_CPUBUF |
| 932 | }; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 933 | extern void sched_setnuma(struct task_struct *p, int node); |
Mel Gorman | e6628d5 | 2013-10-07 11:29:02 +0100 | [diff] [blame] | 934 | extern int migrate_task_to(struct task_struct *p, int cpu); |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 935 | extern int migrate_swap(struct task_struct *, struct task_struct *); |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 936 | #endif /* CONFIG_NUMA_BALANCING */ |
| 937 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 938 | #ifdef CONFIG_SMP |
| 939 | |
Peter Zijlstra | e3fca9e | 2015-06-11 14:46:37 +0200 | [diff] [blame] | 940 | static inline void |
| 941 | queue_balance_callback(struct rq *rq, |
| 942 | struct callback_head *head, |
| 943 | void (*func)(struct rq *rq)) |
| 944 | { |
| 945 | lockdep_assert_held(&rq->lock); |
| 946 | |
| 947 | if (unlikely(head->next)) |
| 948 | return; |
| 949 | |
| 950 | head->func = (void (*)(struct callback_head *))func; |
| 951 | head->next = rq->balance_callback; |
| 952 | rq->balance_callback = head; |
| 953 | } |
| 954 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 955 | extern void sched_ttwu_pending(void); |
| 956 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 957 | #define rcu_dereference_check_sched_domain(p) \ |
| 958 | rcu_dereference_check((p), \ |
| 959 | lockdep_is_held(&sched_domains_mutex)) |
| 960 | |
| 961 | /* |
| 962 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
| 963 | * See detach_destroy_domains: synchronize_sched for details. |
| 964 | * |
| 965 | * The domain tree of any CPU may only be accessed from within |
| 966 | * preempt-disabled sections. |
| 967 | */ |
| 968 | #define for_each_domain(cpu, __sd) \ |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 969 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ |
| 970 | __sd; __sd = __sd->parent) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 971 | |
Suresh Siddha | 77e8136 | 2011-11-17 11:08:23 -0800 | [diff] [blame] | 972 | #define for_each_lower_domain(sd) for (; sd; sd = sd->child) |
| 973 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 974 | /** |
| 975 | * highest_flag_domain - Return highest sched_domain containing flag. |
| 976 | * @cpu: The cpu whose highest level of sched domain is to |
| 977 | * be returned. |
| 978 | * @flag: The flag to check for the highest sched_domain |
| 979 | * for the given cpu. |
| 980 | * |
| 981 | * Returns the highest sched_domain of a cpu which contains the given flag. |
| 982 | */ |
| 983 | static inline struct sched_domain *highest_flag_domain(int cpu, int flag) |
| 984 | { |
| 985 | struct sched_domain *sd, *hsd = NULL; |
| 986 | |
| 987 | for_each_domain(cpu, sd) { |
| 988 | if (!(sd->flags & flag)) |
| 989 | break; |
| 990 | hsd = sd; |
| 991 | } |
| 992 | |
| 993 | return hsd; |
| 994 | } |
| 995 | |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 996 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) |
| 997 | { |
| 998 | struct sched_domain *sd; |
| 999 | |
| 1000 | for_each_domain(cpu, sd) { |
| 1001 | if (sd->flags & flag) |
| 1002 | break; |
| 1003 | } |
| 1004 | |
| 1005 | return sd; |
| 1006 | } |
| 1007 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1008 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
Peter Zijlstra | 7d9ffa8 | 2013-07-04 12:56:46 +0800 | [diff] [blame] | 1009 | DECLARE_PER_CPU(int, sd_llc_size); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1010 | DECLARE_PER_CPU(int, sd_llc_id); |
Peter Zijlstra | 0e369d7 | 2016-05-09 10:38:01 +0200 | [diff] [blame] | 1011 | DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 1012 | DECLARE_PER_CPU(struct sched_domain *, sd_numa); |
Preeti U Murthy | 37dc6b5 | 2013-10-30 08:42:52 +0530 | [diff] [blame] | 1013 | DECLARE_PER_CPU(struct sched_domain *, sd_asym); |
Morten Rasmussen | 30786a0 | 2015-01-02 17:08:52 +0000 | [diff] [blame] | 1014 | DECLARE_PER_CPU(struct sched_domain *, sd_ea); |
Morten Rasmussen | 61bf625 | 2014-12-18 14:47:18 +0000 | [diff] [blame] | 1015 | DECLARE_PER_CPU(struct sched_domain *, sd_scs); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1016 | |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1017 | struct sched_group_capacity { |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1018 | atomic_t ref; |
| 1019 | /* |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 1020 | * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1021 | * for a single CPU. |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1022 | */ |
Morten Rasmussen | 5cdeb5f | 2016-02-25 12:43:49 +0000 | [diff] [blame] | 1023 | unsigned long capacity; |
| 1024 | unsigned long max_capacity; /* Max per-cpu capacity in group */ |
Morten Rasmussen | 3d8cb90 | 2016-10-14 14:41:09 +0100 | [diff] [blame] | 1025 | unsigned long min_capacity; /* Min per-CPU capacity in group */ |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1026 | unsigned long next_update; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1027 | int imbalance; /* XXX unrelated to capacity but shared group state */ |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1028 | |
| 1029 | unsigned long cpumask[0]; /* iteration mask */ |
| 1030 | }; |
| 1031 | |
| 1032 | struct sched_group { |
| 1033 | struct sched_group *next; /* Must be a circular list */ |
| 1034 | atomic_t ref; |
| 1035 | |
| 1036 | unsigned int group_weight; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1037 | struct sched_group_capacity *sgc; |
Greg Hackmann | 2a3c6e6 | 2017-03-07 10:37:56 -0800 | [diff] [blame] | 1038 | const struct sched_group_energy *sge; |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1039 | |
| 1040 | /* |
| 1041 | * The CPUs this group covers. |
| 1042 | * |
| 1043 | * NOTE: this field is variable length. (Allocated dynamically |
| 1044 | * by attaching extra space to the end of the structure, |
| 1045 | * depending on how many CPUs the kernel has booted up with) |
| 1046 | */ |
| 1047 | unsigned long cpumask[0]; |
| 1048 | }; |
| 1049 | |
| 1050 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
| 1051 | { |
| 1052 | return to_cpumask(sg->cpumask); |
| 1053 | } |
| 1054 | |
| 1055 | /* |
| 1056 | * cpumask masking which cpus in the group are allowed to iterate up the domain |
| 1057 | * tree. |
| 1058 | */ |
| 1059 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) |
| 1060 | { |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1061 | return to_cpumask(sg->sgc->cpumask); |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1062 | } |
| 1063 | |
| 1064 | /** |
| 1065 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. |
| 1066 | * @group: The group whose first cpu is to be returned. |
| 1067 | */ |
| 1068 | static inline unsigned int group_first_cpu(struct sched_group *group) |
| 1069 | { |
| 1070 | return cpumask_first(sched_group_cpus(group)); |
| 1071 | } |
| 1072 | |
Peter Zijlstra | c117487 | 2012-05-31 14:47:33 +0200 | [diff] [blame] | 1073 | extern int group_balance_cpu(struct sched_group *sg); |
| 1074 | |
Steven Rostedt (Red Hat) | 3866e84 | 2016-02-22 16:26:51 -0500 | [diff] [blame] | 1075 | #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) |
| 1076 | void register_sched_domain_sysctl(void); |
| 1077 | void unregister_sched_domain_sysctl(void); |
| 1078 | #else |
| 1079 | static inline void register_sched_domain_sysctl(void) |
| 1080 | { |
| 1081 | } |
| 1082 | static inline void unregister_sched_domain_sysctl(void) |
| 1083 | { |
| 1084 | } |
| 1085 | #endif |
| 1086 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 1087 | #else |
| 1088 | |
| 1089 | static inline void sched_ttwu_pending(void) { } |
| 1090 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1091 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1092 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 1093 | #include "stats.h" |
| 1094 | #include "auto_group.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1095 | |
Pavankumar Kondeti | cbf7ea0 | 2017-01-11 15:11:23 +0530 | [diff] [blame] | 1096 | enum sched_boost_policy { |
| 1097 | SCHED_BOOST_NONE, |
| 1098 | SCHED_BOOST_ON_BIG, |
| 1099 | SCHED_BOOST_ON_ALL, |
| 1100 | }; |
| 1101 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 1102 | /* |
| 1103 | * Returns the rq capacity of any rq in a group. This does not play |
| 1104 | * well with groups where rq capacity can change independently. |
| 1105 | */ |
| 1106 | #define group_rq_capacity(group) cpu_capacity(group_first_cpu(group)) |
| 1107 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1108 | #ifdef CONFIG_CGROUP_SCHED |
| 1109 | |
| 1110 | /* |
| 1111 | * Return the group to which this tasks belongs. |
| 1112 | * |
Tejun Heo | 8af01f5 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 1113 | * We cannot use task_css() and friends because the cgroup subsystem |
| 1114 | * changes that value before the cgroup_subsys::attach() method is called, |
| 1115 | * therefore we cannot pin it and might observe the wrong value. |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 1116 | * |
| 1117 | * The same is true for autogroup's p->signal->autogroup->tg, the autogroup |
| 1118 | * core changes this before calling sched_move_task(). |
| 1119 | * |
| 1120 | * Instead we use a 'copy' which is updated from sched_move_task() while |
| 1121 | * holding both task_struct::pi_lock and rq::lock. |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1122 | */ |
| 1123 | static inline struct task_group *task_group(struct task_struct *p) |
| 1124 | { |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 1125 | return p->sched_task_group; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1126 | } |
| 1127 | |
| 1128 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| 1129 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| 1130 | { |
| 1131 | #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) |
| 1132 | struct task_group *tg = task_group(p); |
| 1133 | #endif |
| 1134 | |
| 1135 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Byungchul Park | ad936d8 | 2015-10-24 01:16:19 +0900 | [diff] [blame] | 1136 | set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1137 | p->se.cfs_rq = tg->cfs_rq[cpu]; |
| 1138 | p->se.parent = tg->se[cpu]; |
| 1139 | #endif |
| 1140 | |
| 1141 | #ifdef CONFIG_RT_GROUP_SCHED |
| 1142 | p->rt.rt_rq = tg->rt_rq[cpu]; |
| 1143 | p->rt.parent = tg->rt_se[cpu]; |
| 1144 | #endif |
| 1145 | } |
| 1146 | |
| 1147 | #else /* CONFIG_CGROUP_SCHED */ |
| 1148 | |
| 1149 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
| 1150 | static inline struct task_group *task_group(struct task_struct *p) |
| 1151 | { |
| 1152 | return NULL; |
| 1153 | } |
| 1154 | |
| 1155 | #endif /* CONFIG_CGROUP_SCHED */ |
| 1156 | |
| 1157 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1158 | { |
| 1159 | set_task_rq(p, cpu); |
| 1160 | #ifdef CONFIG_SMP |
| 1161 | /* |
| 1162 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
| 1163 | * successfuly executed on another CPU. We must ensure that updates of |
| 1164 | * per-task data have been completed by this moment. |
| 1165 | */ |
| 1166 | smp_wmb(); |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1167 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1168 | p->cpu = cpu; |
| 1169 | #else |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1170 | task_thread_info(p)->cpu = cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1171 | #endif |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 1172 | p->wake_cpu = cpu; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1173 | #endif |
| 1174 | } |
| 1175 | |
| 1176 | /* |
| 1177 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
| 1178 | */ |
| 1179 | #ifdef CONFIG_SCHED_DEBUG |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1180 | # include <linux/static_key.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1181 | # define const_debug __read_mostly |
| 1182 | #else |
| 1183 | # define const_debug const |
| 1184 | #endif |
| 1185 | |
| 1186 | extern const_debug unsigned int sysctl_sched_features; |
| 1187 | |
| 1188 | #define SCHED_FEAT(name, enabled) \ |
| 1189 | __SCHED_FEAT_##name , |
| 1190 | |
| 1191 | enum { |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 1192 | #include "features.h" |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1193 | __SCHED_FEAT_NR, |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1194 | }; |
| 1195 | |
| 1196 | #undef SCHED_FEAT |
| 1197 | |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1198 | #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1199 | #define SCHED_FEAT(name, enabled) \ |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1200 | static __always_inline bool static_branch_##name(struct static_key *key) \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1201 | { \ |
Jason Baron | 6e76ea8 | 2014-07-02 15:52:41 +0000 | [diff] [blame] | 1202 | return static_key_##enabled(key); \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1203 | } |
| 1204 | |
| 1205 | #include "features.h" |
| 1206 | |
| 1207 | #undef SCHED_FEAT |
| 1208 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1209 | extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1210 | #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) |
| 1211 | #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1212 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1213 | #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1214 | |
Srikar Dronamraju | 2a59572 | 2015-08-11 21:54:21 +0530 | [diff] [blame] | 1215 | extern struct static_key_false sched_numa_balancing; |
Mel Gorman | cb25176 | 2016-02-05 09:08:36 +0000 | [diff] [blame] | 1216 | extern struct static_key_false sched_schedstats; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1217 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1218 | static inline u64 global_rt_period(void) |
| 1219 | { |
| 1220 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
| 1221 | } |
| 1222 | |
| 1223 | static inline u64 global_rt_runtime(void) |
| 1224 | { |
| 1225 | if (sysctl_sched_rt_runtime < 0) |
| 1226 | return RUNTIME_INF; |
| 1227 | |
| 1228 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| 1229 | } |
| 1230 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1231 | static inline int task_current(struct rq *rq, struct task_struct *p) |
| 1232 | { |
| 1233 | return rq->curr == p; |
| 1234 | } |
| 1235 | |
| 1236 | static inline int task_running(struct rq *rq, struct task_struct *p) |
| 1237 | { |
| 1238 | #ifdef CONFIG_SMP |
| 1239 | return p->on_cpu; |
| 1240 | #else |
| 1241 | return task_current(rq, p); |
| 1242 | #endif |
| 1243 | } |
| 1244 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 1245 | static inline int task_on_rq_queued(struct task_struct *p) |
| 1246 | { |
| 1247 | return p->on_rq == TASK_ON_RQ_QUEUED; |
| 1248 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1249 | |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 1250 | static inline int task_on_rq_migrating(struct task_struct *p) |
| 1251 | { |
| 1252 | return p->on_rq == TASK_ON_RQ_MIGRATING; |
| 1253 | } |
| 1254 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1255 | #ifndef prepare_arch_switch |
| 1256 | # define prepare_arch_switch(next) do { } while (0) |
| 1257 | #endif |
Catalin Marinas | 01f23e1 | 2011-11-27 21:43:10 +0000 | [diff] [blame] | 1258 | #ifndef finish_arch_post_lock_switch |
| 1259 | # define finish_arch_post_lock_switch() do { } while (0) |
| 1260 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1261 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1262 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
| 1263 | { |
| 1264 | #ifdef CONFIG_SMP |
| 1265 | /* |
| 1266 | * We can optimise this out completely for !SMP, because the |
| 1267 | * SMP rebalancing from interrupt is the only thing that cares |
| 1268 | * here. |
| 1269 | */ |
| 1270 | next->on_cpu = 1; |
| 1271 | #endif |
| 1272 | } |
| 1273 | |
| 1274 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
| 1275 | { |
| 1276 | #ifdef CONFIG_SMP |
| 1277 | /* |
| 1278 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
| 1279 | * We must ensure this doesn't happen until the switch is completely |
| 1280 | * finished. |
Peter Zijlstra | 95913d9 | 2015-09-29 14:45:09 +0200 | [diff] [blame] | 1281 | * |
Peter Zijlstra | b75a225 | 2015-10-06 14:36:17 +0200 | [diff] [blame] | 1282 | * In particular, the load of prev->state in finish_task_switch() must |
| 1283 | * happen before this. |
| 1284 | * |
Peter Zijlstra | 1f03e8d | 2016-04-04 10:57:12 +0200 | [diff] [blame] | 1285 | * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1286 | */ |
Peter Zijlstra | 95913d9 | 2015-09-29 14:45:09 +0200 | [diff] [blame] | 1287 | smp_store_release(&prev->on_cpu, 0); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1288 | #endif |
| 1289 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 1290 | /* this is a valid case when another task releases the spinlock */ |
| 1291 | rq->lock.owner = current; |
| 1292 | #endif |
| 1293 | /* |
| 1294 | * If we are tracking spinlock dependencies then we have to |
| 1295 | * fix up the runqueue lock - which gets 'carried over' from |
| 1296 | * prev into current: |
| 1297 | */ |
| 1298 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
| 1299 | |
| 1300 | raw_spin_unlock_irq(&rq->lock); |
| 1301 | } |
| 1302 | |
Li Zefan | b13095f | 2013-03-05 16:06:38 +0800 | [diff] [blame] | 1303 | /* |
| 1304 | * wake flags |
| 1305 | */ |
| 1306 | #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ |
| 1307 | #define WF_FORK 0x02 /* child wakeup after fork */ |
| 1308 | #define WF_MIGRATED 0x4 /* internal use, task got migrated */ |
| 1309 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1310 | /* |
| 1311 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
| 1312 | * of tasks with abnormal "nice" values across CPUs the contribution that |
| 1313 | * each task makes to its run queue's load is weighted according to its |
| 1314 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
| 1315 | * scaled version of the new time slice allocation that they receive on time |
| 1316 | * slice expiry etc. |
| 1317 | */ |
| 1318 | |
| 1319 | #define WEIGHT_IDLEPRIO 3 |
| 1320 | #define WMULT_IDLEPRIO 1431655765 |
| 1321 | |
Andi Kleen | ed82b8a | 2015-11-29 20:59:43 -0800 | [diff] [blame] | 1322 | extern const int sched_prio_to_weight[40]; |
| 1323 | extern const u32 sched_prio_to_wmult[40]; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1324 | |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 1325 | /* |
| 1326 | * {de,en}queue flags: |
| 1327 | * |
| 1328 | * DEQUEUE_SLEEP - task is no longer runnable |
| 1329 | * ENQUEUE_WAKEUP - task just became runnable |
| 1330 | * |
| 1331 | * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks |
| 1332 | * are in a known state which allows modification. Such pairs |
| 1333 | * should preserve as much state as possible. |
| 1334 | * |
| 1335 | * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location |
| 1336 | * in the runqueue. |
| 1337 | * |
| 1338 | * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) |
| 1339 | * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) |
Peter Zijlstra | 59efa0b | 2016-05-10 18:24:37 +0200 | [diff] [blame] | 1340 | * ENQUEUE_MIGRATED - the task was migrated during wakeup |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 1341 | * |
| 1342 | */ |
| 1343 | |
| 1344 | #define DEQUEUE_SLEEP 0x01 |
| 1345 | #define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ |
| 1346 | #define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */ |
| 1347 | |
Peter Zijlstra | 1de6444 | 2015-09-30 17:44:13 +0200 | [diff] [blame] | 1348 | #define ENQUEUE_WAKEUP 0x01 |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 1349 | #define ENQUEUE_RESTORE 0x02 |
| 1350 | #define ENQUEUE_MOVE 0x04 |
| 1351 | |
| 1352 | #define ENQUEUE_HEAD 0x08 |
| 1353 | #define ENQUEUE_REPLENISH 0x10 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1354 | #ifdef CONFIG_SMP |
Peter Zijlstra | 59efa0b | 2016-05-10 18:24:37 +0200 | [diff] [blame] | 1355 | #define ENQUEUE_MIGRATED 0x20 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1356 | #else |
Peter Zijlstra | 59efa0b | 2016-05-10 18:24:37 +0200 | [diff] [blame] | 1357 | #define ENQUEUE_MIGRATED 0x00 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1358 | #endif |
Juri Lelli | 43aac89 | 2015-06-26 12:14:23 +0100 | [diff] [blame] | 1359 | #define ENQUEUE_WAKEUP_NEW 0x40 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1360 | |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1361 | #define RETRY_TASK ((void *)-1UL) |
| 1362 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1363 | struct sched_class { |
| 1364 | const struct sched_class *next; |
| 1365 | |
| 1366 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1367 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1368 | void (*yield_task) (struct rq *rq); |
| 1369 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); |
| 1370 | |
| 1371 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
| 1372 | |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1373 | /* |
| 1374 | * It is the responsibility of the pick_next_task() method that will |
| 1375 | * return the next task to call put_prev_task() on the @prev task or |
| 1376 | * something equivalent. |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1377 | * |
| 1378 | * May return RETRY_TASK when it finds a higher prio class has runnable |
| 1379 | * tasks. |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1380 | */ |
| 1381 | struct task_struct * (*pick_next_task) (struct rq *rq, |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 1382 | struct task_struct *prev, |
| 1383 | struct pin_cookie cookie); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1384 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
| 1385 | |
| 1386 | #ifdef CONFIG_SMP |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 1387 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
xiaofeng.yan | 5a4fd03 | 2015-09-23 14:55:59 +0800 | [diff] [blame] | 1388 | void (*migrate_task_rq)(struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1389 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1390 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
| 1391 | |
| 1392 | void (*set_cpus_allowed)(struct task_struct *p, |
| 1393 | const struct cpumask *newmask); |
| 1394 | |
| 1395 | void (*rq_online)(struct rq *rq); |
| 1396 | void (*rq_offline)(struct rq *rq); |
| 1397 | #endif |
| 1398 | |
| 1399 | void (*set_curr_task) (struct rq *rq); |
| 1400 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
| 1401 | void (*task_fork) (struct task_struct *p); |
Dario Faggioli | e6c390f | 2013-11-07 14:43:35 +0100 | [diff] [blame] | 1402 | void (*task_dead) (struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1403 | |
Kirill Tkhai | 67dfa1b | 2014-10-27 17:40:52 +0300 | [diff] [blame] | 1404 | /* |
| 1405 | * The switched_from() call is allowed to drop rq->lock, therefore we |
| 1406 | * cannot assume the switched_from/switched_to pair is serliazed by |
| 1407 | * rq->lock. They are however serialized by p->pi_lock. |
| 1408 | */ |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1409 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); |
| 1410 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); |
| 1411 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
| 1412 | int oldprio); |
| 1413 | |
| 1414 | unsigned int (*get_rr_interval) (struct rq *rq, |
| 1415 | struct task_struct *task); |
| 1416 | |
Stanislaw Gruszka | 6e99891 | 2014-11-12 16:58:44 +0100 | [diff] [blame] | 1417 | void (*update_curr) (struct rq *rq); |
| 1418 | |
Vincent Guittot | ea86cb4 | 2016-06-17 13:38:55 +0200 | [diff] [blame] | 1419 | #define TASK_SET_GROUP 0 |
| 1420 | #define TASK_MOVE_GROUP 1 |
| 1421 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1422 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Vincent Guittot | ea86cb4 | 2016-06-17 13:38:55 +0200 | [diff] [blame] | 1423 | void (*task_change_group) (struct task_struct *p, int type); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1424 | #endif |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 1425 | #ifdef CONFIG_SCHED_WALT |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 1426 | void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p, |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 1427 | u32 new_task_load, u32 new_pred_demand); |
| 1428 | #endif |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1429 | }; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1430 | |
Peter Zijlstra | 3f1d2a3 | 2014-02-12 10:49:30 +0100 | [diff] [blame] | 1431 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
| 1432 | { |
| 1433 | prev->sched_class->put_prev_task(rq, prev); |
| 1434 | } |
| 1435 | |
Peter Zijlstra | b2bf6c3 | 2016-09-20 22:00:38 +0200 | [diff] [blame] | 1436 | static inline void set_curr_task(struct rq *rq, struct task_struct *curr) |
| 1437 | { |
| 1438 | curr->sched_class->set_curr_task(rq); |
| 1439 | } |
| 1440 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1441 | #define sched_class_highest (&stop_sched_class) |
| 1442 | #define for_each_class(class) \ |
| 1443 | for (class = sched_class_highest; class; class = class->next) |
| 1444 | |
| 1445 | extern const struct sched_class stop_sched_class; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1446 | extern const struct sched_class dl_sched_class; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1447 | extern const struct sched_class rt_sched_class; |
| 1448 | extern const struct sched_class fair_sched_class; |
| 1449 | extern const struct sched_class idle_sched_class; |
| 1450 | |
| 1451 | |
| 1452 | #ifdef CONFIG_SMP |
| 1453 | |
Patrick Bellasi | 2178e84 | 2016-07-22 11:35:59 +0100 | [diff] [blame] | 1454 | extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc); |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1455 | extern void update_group_capacity(struct sched_domain *sd, int cpu); |
Li Zefan | b719203 | 2013-03-07 10:00:26 +0800 | [diff] [blame] | 1456 | |
Daniel Lezcano | 7caff66 | 2014-01-06 12:34:38 +0100 | [diff] [blame] | 1457 | extern void trigger_load_balance(struct rq *rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1458 | |
Peter Zijlstra | c5b2803 | 2015-05-15 17:43:35 +0200 | [diff] [blame] | 1459 | extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); |
| 1460 | |
Joonwoo Park | 0b6cfb3 | 2017-08-09 11:57:12 -0700 | [diff] [blame] | 1461 | bool __cpu_overutilized(int cpu, unsigned long util); |
Joonwoo Park | dc3420d | 2017-01-31 11:14:43 -0800 | [diff] [blame] | 1462 | bool cpu_overutilized(int cpu); |
| 1463 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1464 | #endif |
| 1465 | |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1466 | #ifdef CONFIG_CPU_IDLE |
| 1467 | static inline void idle_set_state(struct rq *rq, |
| 1468 | struct cpuidle_state *idle_state) |
| 1469 | { |
| 1470 | rq->idle_state = idle_state; |
| 1471 | } |
| 1472 | |
| 1473 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1474 | { |
Peter Zijlstra | 9148a3a | 2016-09-20 22:34:51 +0200 | [diff] [blame] | 1475 | SCHED_WARN_ON(!rcu_read_lock_held()); |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1476 | return rq->idle_state; |
| 1477 | } |
Morten Rasmussen | 0691064 | 2015-01-27 13:48:07 +0000 | [diff] [blame] | 1478 | |
| 1479 | static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx) |
| 1480 | { |
| 1481 | rq->idle_state_idx = idle_state_idx; |
| 1482 | } |
| 1483 | |
| 1484 | static inline int idle_get_state_idx(struct rq *rq) |
| 1485 | { |
| 1486 | WARN_ON(!rcu_read_lock_held()); |
Pavankumar Kondeti | c3dae85 | 2017-06-19 15:30:11 +0530 | [diff] [blame] | 1487 | |
| 1488 | if (rq->nr_running || cpu_of(rq) == raw_smp_processor_id()) |
| 1489 | return -1; |
| 1490 | |
Morten Rasmussen | 0691064 | 2015-01-27 13:48:07 +0000 | [diff] [blame] | 1491 | return rq->idle_state_idx; |
| 1492 | } |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1493 | #else |
| 1494 | static inline void idle_set_state(struct rq *rq, |
| 1495 | struct cpuidle_state *idle_state) |
| 1496 | { |
| 1497 | } |
| 1498 | |
| 1499 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1500 | { |
| 1501 | return NULL; |
| 1502 | } |
Morten Rasmussen | 0691064 | 2015-01-27 13:48:07 +0000 | [diff] [blame] | 1503 | |
| 1504 | static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx) |
| 1505 | { |
| 1506 | } |
| 1507 | |
| 1508 | static inline int idle_get_state_idx(struct rq *rq) |
| 1509 | { |
| 1510 | return -1; |
| 1511 | } |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1512 | #endif |
| 1513 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1514 | extern void sysrq_sched_debug_show(void); |
| 1515 | extern void sched_init_granularity(void); |
| 1516 | extern void update_max_interval(void); |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 1517 | |
| 1518 | extern void init_sched_dl_class(void); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1519 | extern void init_sched_rt_class(void); |
| 1520 | extern void init_sched_fair_class(void); |
| 1521 | |
Kirill Tkhai | 8875125 | 2014-06-29 00:03:57 +0400 | [diff] [blame] | 1522 | extern void resched_curr(struct rq *rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1523 | extern void resched_cpu(int cpu); |
| 1524 | |
| 1525 | extern struct rt_bandwidth def_rt_bandwidth; |
| 1526 | extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); |
| 1527 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1528 | extern struct dl_bandwidth def_dl_bandwidth; |
| 1529 | extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1530 | extern void init_dl_task_timer(struct sched_dl_entity *dl_se); |
| 1531 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1532 | unsigned long to_ratio(u64 period, u64 runtime); |
| 1533 | |
Yuyang Du | 540247f | 2015-07-15 08:04:39 +0800 | [diff] [blame] | 1534 | extern void init_entity_runnable_average(struct sched_entity *se); |
Yuyang Du | 2b8c41d | 2016-03-30 04:30:56 +0800 | [diff] [blame] | 1535 | extern void post_init_entity_util_avg(struct sched_entity *se); |
Alex Shi | a75cdaa | 2013-06-20 10:18:47 +0800 | [diff] [blame] | 1536 | |
Frederic Weisbecker | 76d92ac | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 1537 | #ifdef CONFIG_NO_HZ_FULL |
| 1538 | extern bool sched_can_stop_tick(struct rq *rq); |
| 1539 | |
| 1540 | /* |
| 1541 | * Tick may be needed by tasks in the runqueue depending on their policy and |
| 1542 | * requirements. If tick is needed, lets send the target an IPI to kick it out of |
| 1543 | * nohz mode if necessary. |
| 1544 | */ |
| 1545 | static inline void sched_update_tick_dependency(struct rq *rq) |
| 1546 | { |
| 1547 | int cpu; |
| 1548 | |
| 1549 | if (!tick_nohz_full_enabled()) |
| 1550 | return; |
| 1551 | |
| 1552 | cpu = cpu_of(rq); |
| 1553 | |
| 1554 | if (!tick_nohz_full_cpu(cpu)) |
| 1555 | return; |
| 1556 | |
| 1557 | if (sched_can_stop_tick(rq)) |
| 1558 | tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); |
| 1559 | else |
| 1560 | tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); |
| 1561 | } |
| 1562 | #else |
| 1563 | static inline void sched_update_tick_dependency(struct rq *rq) { } |
| 1564 | #endif |
| 1565 | |
Joseph Lo | 7750186 | 2013-04-22 14:39:18 +0800 | [diff] [blame] | 1566 | static inline void __add_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1567 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1568 | unsigned prev_nr = rq->nr_running; |
| 1569 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 1570 | sched_update_nr_prod(cpu_of(rq), count, true); |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1571 | rq->nr_running = prev_nr + count; |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1572 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1573 | if (prev_nr < 2 && rq->nr_running >= 2) { |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1574 | #ifdef CONFIG_SMP |
| 1575 | if (!rq->rd->overload) |
| 1576 | rq->rd->overload = true; |
| 1577 | #endif |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1578 | } |
Frederic Weisbecker | 76d92ac | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 1579 | |
| 1580 | sched_update_tick_dependency(rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1581 | } |
| 1582 | |
Joseph Lo | 7750186 | 2013-04-22 14:39:18 +0800 | [diff] [blame] | 1583 | static inline void __sub_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1584 | { |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 1585 | sched_update_nr_prod(cpu_of(rq), count, false); |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1586 | rq->nr_running -= count; |
Frederic Weisbecker | 76d92ac | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 1587 | /* Check if we still need preemption */ |
| 1588 | sched_update_tick_dependency(rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1589 | } |
| 1590 | |
Joseph Lo | 7750186 | 2013-04-22 14:39:18 +0800 | [diff] [blame] | 1591 | #ifdef CONFIG_CPU_QUIET |
| 1592 | #define NR_AVE_SCALE(x) ((x) << FSHIFT) |
| 1593 | static inline u64 do_nr_running_integral(struct rq *rq) |
| 1594 | { |
| 1595 | s64 nr, deltax; |
| 1596 | u64 nr_running_integral = rq->nr_running_integral; |
| 1597 | |
| 1598 | deltax = rq->clock_task - rq->nr_last_stamp; |
| 1599 | nr = NR_AVE_SCALE(rq->nr_running); |
| 1600 | |
| 1601 | nr_running_integral += nr * deltax; |
| 1602 | |
| 1603 | return nr_running_integral; |
| 1604 | } |
| 1605 | |
| 1606 | static inline void add_nr_running(struct rq *rq, unsigned count) |
| 1607 | { |
| 1608 | write_seqcount_begin(&rq->ave_seqcnt); |
| 1609 | rq->nr_running_integral = do_nr_running_integral(rq); |
| 1610 | rq->nr_last_stamp = rq->clock_task; |
| 1611 | __add_nr_running(rq, count); |
| 1612 | write_seqcount_end(&rq->ave_seqcnt); |
| 1613 | } |
| 1614 | |
| 1615 | static inline void sub_nr_running(struct rq *rq, unsigned count) |
| 1616 | { |
| 1617 | write_seqcount_begin(&rq->ave_seqcnt); |
| 1618 | rq->nr_running_integral = do_nr_running_integral(rq); |
| 1619 | rq->nr_last_stamp = rq->clock_task; |
| 1620 | __sub_nr_running(rq, count); |
| 1621 | write_seqcount_end(&rq->ave_seqcnt); |
| 1622 | } |
| 1623 | #else |
| 1624 | #define add_nr_running __add_nr_running |
| 1625 | #define sub_nr_running __sub_nr_running |
| 1626 | #endif |
| 1627 | |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 1628 | static inline void rq_last_tick_reset(struct rq *rq) |
| 1629 | { |
| 1630 | #ifdef CONFIG_NO_HZ_FULL |
| 1631 | rq->last_sched_tick = jiffies; |
| 1632 | #endif |
| 1633 | } |
| 1634 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1635 | extern void update_rq_clock(struct rq *rq); |
| 1636 | |
| 1637 | extern void activate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1638 | extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1639 | |
| 1640 | extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
| 1641 | |
| 1642 | extern const_debug unsigned int sysctl_sched_time_avg; |
| 1643 | extern const_debug unsigned int sysctl_sched_nr_migrate; |
| 1644 | extern const_debug unsigned int sysctl_sched_migration_cost; |
| 1645 | |
| 1646 | static inline u64 sched_avg_period(void) |
| 1647 | { |
| 1648 | return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; |
| 1649 | } |
| 1650 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1651 | #ifdef CONFIG_SCHED_HRTICK |
| 1652 | |
| 1653 | /* |
| 1654 | * Use hrtick when: |
| 1655 | * - enabled by features |
| 1656 | * - hrtimer is actually high res |
| 1657 | */ |
| 1658 | static inline int hrtick_enabled(struct rq *rq) |
| 1659 | { |
| 1660 | if (!sched_feat(HRTICK)) |
| 1661 | return 0; |
| 1662 | if (!cpu_active(cpu_of(rq))) |
| 1663 | return 0; |
| 1664 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
| 1665 | } |
| 1666 | |
| 1667 | void hrtick_start(struct rq *rq, u64 delay); |
| 1668 | |
Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 1669 | #else |
| 1670 | |
| 1671 | static inline int hrtick_enabled(struct rq *rq) |
| 1672 | { |
| 1673 | return 0; |
| 1674 | } |
| 1675 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1676 | #endif /* CONFIG_SCHED_HRTICK */ |
| 1677 | |
| 1678 | #ifdef CONFIG_SMP |
| 1679 | extern void sched_avg_update(struct rq *rq); |
Peter Zijlstra | dfbca41 | 2015-03-23 14:19:05 +0100 | [diff] [blame] | 1680 | |
| 1681 | #ifndef arch_scale_freq_capacity |
| 1682 | static __always_inline |
| 1683 | unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) |
| 1684 | { |
| 1685 | return SCHED_CAPACITY_SCALE; |
| 1686 | } |
| 1687 | #endif |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1688 | |
Morten Rasmussen | 8cd5601 | 2015-08-14 17:23:10 +0100 | [diff] [blame] | 1689 | #ifndef arch_scale_cpu_capacity |
| 1690 | static __always_inline |
| 1691 | unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) |
| 1692 | { |
Dietmar Eggemann | e3279a2 | 2015-08-15 00:04:41 +0100 | [diff] [blame] | 1693 | if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) |
Morten Rasmussen | 8cd5601 | 2015-08-14 17:23:10 +0100 | [diff] [blame] | 1694 | return sd->smt_gain / sd->span_weight; |
| 1695 | |
| 1696 | return SCHED_CAPACITY_SCALE; |
| 1697 | } |
| 1698 | #endif |
| 1699 | |
Joonwoo Park | 28c5121 | 2017-06-09 14:06:54 -0700 | [diff] [blame] | 1700 | #ifndef arch_update_cpu_capacity |
| 1701 | static __always_inline |
| 1702 | void arch_update_cpu_capacity(int cpu) |
| 1703 | { |
| 1704 | } |
| 1705 | #endif |
| 1706 | |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1707 | #ifdef CONFIG_SMP |
| 1708 | static inline unsigned long capacity_of(int cpu) |
| 1709 | { |
| 1710 | return cpu_rq(cpu)->cpu_capacity; |
| 1711 | } |
| 1712 | |
| 1713 | static inline unsigned long capacity_orig_of(int cpu) |
| 1714 | { |
| 1715 | return cpu_rq(cpu)->cpu_capacity_orig; |
| 1716 | } |
| 1717 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1718 | extern unsigned int walt_disabled; |
| 1719 | |
Pavankumar Kondeti | 3066bc4 | 2017-08-01 15:45:31 +0530 | [diff] [blame] | 1720 | static inline unsigned long task_util(struct task_struct *p) |
| 1721 | { |
| 1722 | #ifdef CONFIG_SCHED_WALT |
| 1723 | if (!walt_disabled && sysctl_sched_use_walt_task_util) |
| 1724 | return p->ravg.demand / |
| 1725 | (sched_ravg_window >> SCHED_CAPACITY_SHIFT); |
| 1726 | #endif |
| 1727 | return p->se.avg.util_avg; |
| 1728 | } |
| 1729 | |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1730 | /* |
| 1731 | * cpu_util returns the amount of capacity of a CPU that is used by CFS |
| 1732 | * tasks. The unit of the return value must be the one of capacity so we can |
| 1733 | * compare the utilization with the capacity of the CPU that is available for |
| 1734 | * CFS task (ie cpu_capacity). |
| 1735 | * |
| 1736 | * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the |
| 1737 | * recent utilization of currently non-runnable tasks on a CPU. It represents |
| 1738 | * the amount of utilization of a CPU in the range [0..capacity_orig] where |
| 1739 | * capacity_orig is the cpu_capacity available at the highest frequency |
| 1740 | * (arch_scale_freq_capacity()). |
| 1741 | * The utilization of a CPU converges towards a sum equal to or less than the |
| 1742 | * current capacity (capacity_curr <= capacity_orig) of the CPU because it is |
| 1743 | * the running time on this CPU scaled by capacity_curr. |
| 1744 | * |
| 1745 | * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even |
| 1746 | * higher than capacity_orig because of unfortunate rounding in |
| 1747 | * cfs.avg.util_avg or just after migrating tasks and new task wakeups until |
| 1748 | * the average stabilizes with the new running time. We need to check that the |
| 1749 | * utilization stays within the range of [0..capacity_orig] and cap it if |
| 1750 | * necessary. Without utilization capping, a group could be seen as overloaded |
| 1751 | * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of |
| 1752 | * available capacity. We allow utilization to overshoot capacity_curr (but not |
| 1753 | * capacity_orig) as it useful for predicting the capacity required after task |
| 1754 | * migrations (scheduler-driven DVFS). |
| 1755 | */ |
| 1756 | static inline unsigned long __cpu_util(int cpu, int delta) |
| 1757 | { |
Joonwoo Park | 93a51bf | 2017-01-20 11:10:15 -0800 | [diff] [blame] | 1758 | u64 util = cpu_rq(cpu)->cfs.avg.util_avg; |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1759 | unsigned long capacity = capacity_orig_of(cpu); |
| 1760 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1761 | #ifdef CONFIG_SCHED_WALT |
Amit Pundir | 102f7f4 | 2016-08-24 11:52:17 +0530 | [diff] [blame] | 1762 | if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 1763 | util = cpu_rq(cpu)->walt_stats.cumulative_runnable_avg; |
Joonwoo Park | 93a51bf | 2017-01-20 11:10:15 -0800 | [diff] [blame] | 1764 | util = div64_u64(util, |
| 1765 | sched_ravg_window >> SCHED_CAPACITY_SHIFT); |
Amit Pundir | 102f7f4 | 2016-08-24 11:52:17 +0530 | [diff] [blame] | 1766 | } |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1767 | #endif |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1768 | delta += util; |
| 1769 | if (delta < 0) |
| 1770 | return 0; |
| 1771 | |
| 1772 | return (delta >= capacity) ? capacity : delta; |
| 1773 | } |
| 1774 | |
| 1775 | static inline unsigned long cpu_util(int cpu) |
| 1776 | { |
| 1777 | return __cpu_util(cpu, 0); |
| 1778 | } |
| 1779 | |
Joonwoo Park | 19c0075 | 2017-04-27 14:37:35 -0700 | [diff] [blame] | 1780 | struct sched_walt_cpu_load { |
| 1781 | unsigned long prev_window_util; |
| 1782 | unsigned long nl; |
| 1783 | unsigned long pl; |
Vikram Mulukutla | a65aafe | 2017-06-05 13:37:45 -0700 | [diff] [blame] | 1784 | u64 ws; |
Joonwoo Park | 19c0075 | 2017-04-27 14:37:35 -0700 | [diff] [blame] | 1785 | }; |
| 1786 | |
Joonwoo Park | e19cd6f | 2017-02-03 14:32:49 -0800 | [diff] [blame] | 1787 | static inline unsigned long cpu_util_cum(int cpu, int delta) |
| 1788 | { |
| 1789 | u64 util = cpu_rq(cpu)->cfs.avg.util_avg; |
| 1790 | unsigned long capacity = capacity_orig_of(cpu); |
| 1791 | |
| 1792 | #ifdef CONFIG_SCHED_WALT |
| 1793 | if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { |
| 1794 | util = cpu_rq(cpu)->cum_window_demand; |
| 1795 | util = div64_u64(util, |
| 1796 | sched_ravg_window >> SCHED_CAPACITY_SHIFT); |
| 1797 | } |
| 1798 | #endif |
| 1799 | delta += util; |
| 1800 | if (delta < 0) |
| 1801 | return 0; |
| 1802 | |
| 1803 | return (delta >= capacity) ? capacity : delta; |
| 1804 | } |
| 1805 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 1806 | #ifdef CONFIG_SCHED_WALT |
| 1807 | u64 freq_policy_load(struct rq *rq); |
| 1808 | #endif |
| 1809 | |
Joonwoo Park | 19c0075 | 2017-04-27 14:37:35 -0700 | [diff] [blame] | 1810 | static inline unsigned long |
Joonwoo Park | 858d575 | 2017-08-21 12:09:49 -0700 | [diff] [blame] | 1811 | cpu_util_freq_pelt(int cpu) |
Joonwoo Park | 19c0075 | 2017-04-27 14:37:35 -0700 | [diff] [blame] | 1812 | { |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 1813 | struct rq *rq = cpu_rq(cpu); |
Maria Yu | 4837b1a | 2017-09-22 16:02:01 +0800 | [diff] [blame] | 1814 | u64 util = rq->cfs.avg.util_avg; |
Joonwoo Park | 4266ccd | 2016-12-08 16:12:12 -0800 | [diff] [blame] | 1815 | unsigned long capacity = capacity_orig_of(cpu); |
Joonwoo Park | 19c0075 | 2017-04-27 14:37:35 -0700 | [diff] [blame] | 1816 | |
Joonwoo Park | 858d575 | 2017-08-21 12:09:49 -0700 | [diff] [blame] | 1817 | util *= (100 + per_cpu(sched_load_boost, cpu)); |
| 1818 | do_div(util, 100); |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 1819 | |
Joonwoo Park | 4266ccd | 2016-12-08 16:12:12 -0800 | [diff] [blame] | 1820 | return (util >= capacity) ? capacity : util; |
Joonwoo Park | 19c0075 | 2017-04-27 14:37:35 -0700 | [diff] [blame] | 1821 | } |
Joonwoo Park | 858d575 | 2017-08-21 12:09:49 -0700 | [diff] [blame] | 1822 | |
| 1823 | #ifdef CONFIG_SCHED_WALT |
| 1824 | static inline unsigned long |
| 1825 | cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) |
| 1826 | { |
| 1827 | u64 util, util_unboosted; |
| 1828 | struct rq *rq = cpu_rq(cpu); |
| 1829 | unsigned long capacity = capacity_orig_of(cpu); |
| 1830 | int boost; |
| 1831 | |
| 1832 | if (walt_disabled || !sysctl_sched_use_walt_cpu_util) |
| 1833 | return cpu_util_freq_pelt(cpu); |
| 1834 | |
| 1835 | boost = per_cpu(sched_load_boost, cpu); |
| 1836 | util_unboosted = util = freq_policy_load(rq); |
| 1837 | util = div64_u64(util * (100 + boost), |
| 1838 | walt_cpu_util_freq_divisor); |
| 1839 | |
| 1840 | if (walt_load) { |
| 1841 | u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum + |
| 1842 | rq->grp_time.nt_prev_runnable_sum; |
| 1843 | u64 pl = rq->walt_stats.pred_demands_sum; |
| 1844 | |
| 1845 | /* do_pl_notif() needs unboosted signals */ |
| 1846 | rq->old_busy_time = div64_u64(util_unboosted, |
| 1847 | sched_ravg_window >> |
| 1848 | SCHED_CAPACITY_SHIFT); |
| 1849 | rq->old_estimated_time = div64_u64(pl, sched_ravg_window >> |
| 1850 | SCHED_CAPACITY_SHIFT); |
| 1851 | |
| 1852 | nl = div64_u64(nl * (100 + boost), |
| 1853 | walt_cpu_util_freq_divisor); |
| 1854 | pl = div64_u64(pl * (100 + boost), |
| 1855 | walt_cpu_util_freq_divisor); |
| 1856 | |
| 1857 | walt_load->prev_window_util = util; |
| 1858 | walt_load->nl = nl; |
| 1859 | walt_load->pl = pl; |
| 1860 | walt_load->ws = rq->window_start; |
| 1861 | } |
| 1862 | |
| 1863 | return (util >= capacity) ? capacity : util; |
| 1864 | } |
| 1865 | |
| 1866 | static inline unsigned long |
| 1867 | cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load) |
| 1868 | { |
| 1869 | return cpu_util_freq_walt(cpu, walt_load); |
| 1870 | } |
| 1871 | |
| 1872 | #else |
| 1873 | |
| 1874 | static inline unsigned long |
| 1875 | cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load) |
| 1876 | { |
| 1877 | return cpu_util_freq_pelt(cpu); |
| 1878 | } |
| 1879 | |
| 1880 | #endif /* CONFIG_SCHED_WALT */ |
| 1881 | |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1882 | #endif |
| 1883 | |
Joonwoo Park | 2ae888b | 2017-01-12 17:46:00 -0800 | [diff] [blame] | 1884 | extern unsigned int capacity_margin_freq; |
| 1885 | |
Joonwoo Park | 858d575 | 2017-08-21 12:09:49 -0700 | [diff] [blame] | 1886 | static inline unsigned long |
| 1887 | add_capacity_margin(unsigned long cpu_capacity, int cpu) |
Joonwoo Park | 2ae888b | 2017-01-12 17:46:00 -0800 | [diff] [blame] | 1888 | { |
Joonwoo Park | 858d575 | 2017-08-21 12:09:49 -0700 | [diff] [blame] | 1889 | cpu_capacity = cpu_capacity * capacity_margin_freq * |
| 1890 | (100 + per_cpu(sched_load_boost, cpu)); |
| 1891 | cpu_capacity /= 100; |
Joonwoo Park | 2ae888b | 2017-01-12 17:46:00 -0800 | [diff] [blame] | 1892 | cpu_capacity /= SCHED_CAPACITY_SCALE; |
| 1893 | return cpu_capacity; |
| 1894 | } |
| 1895 | |
Michael Turquette | 3b6188e | 2015-06-30 12:45:48 +0100 | [diff] [blame] | 1896 | #ifdef CONFIG_CPU_FREQ_GOV_SCHED |
Steve Muckle | 608d494 | 2015-06-25 14:12:33 +0100 | [diff] [blame] | 1897 | #define capacity_max SCHED_CAPACITY_SCALE |
Michael Turquette | 3b6188e | 2015-06-30 12:45:48 +0100 | [diff] [blame] | 1898 | extern struct static_key __sched_freq; |
| 1899 | |
| 1900 | static inline bool sched_freq(void) |
| 1901 | { |
| 1902 | return static_key_false(&__sched_freq); |
| 1903 | } |
| 1904 | |
| 1905 | DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs); |
| 1906 | void update_cpu_capacity_request(int cpu, bool request); |
| 1907 | |
| 1908 | static inline void set_cfs_cpu_capacity(int cpu, bool request, |
| 1909 | unsigned long capacity) |
| 1910 | { |
Patrick Bellasi | c4eef1f | 2016-06-30 15:00:41 +0100 | [diff] [blame] | 1911 | struct sched_capacity_reqs *scr = &per_cpu(cpu_sched_capacity_reqs, cpu); |
| 1912 | |
| 1913 | #ifdef CONFIG_SCHED_WALT |
| 1914 | if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { |
| 1915 | int rtdl = scr->rt + scr->dl; |
| 1916 | /* |
| 1917 | * WALT tracks the utilization of a CPU considering the load |
| 1918 | * generated by all the scheduling classes. |
| 1919 | * Since the following call to: |
| 1920 | * update_cpu_capacity |
| 1921 | * is already adding the RT and DL utilizations let's remove |
| 1922 | * these contributions from the WALT signal. |
| 1923 | */ |
| 1924 | if (capacity > rtdl) |
| 1925 | capacity -= rtdl; |
| 1926 | else |
| 1927 | capacity = 0; |
| 1928 | } |
| 1929 | #endif |
| 1930 | if (scr->cfs != capacity) { |
| 1931 | scr->cfs = capacity; |
Michael Turquette | 3b6188e | 2015-06-30 12:45:48 +0100 | [diff] [blame] | 1932 | update_cpu_capacity_request(cpu, request); |
| 1933 | } |
| 1934 | } |
| 1935 | |
| 1936 | static inline void set_rt_cpu_capacity(int cpu, bool request, |
| 1937 | unsigned long capacity) |
| 1938 | { |
| 1939 | if (per_cpu(cpu_sched_capacity_reqs, cpu).rt != capacity) { |
| 1940 | per_cpu(cpu_sched_capacity_reqs, cpu).rt = capacity; |
| 1941 | update_cpu_capacity_request(cpu, request); |
| 1942 | } |
| 1943 | } |
| 1944 | |
| 1945 | static inline void set_dl_cpu_capacity(int cpu, bool request, |
| 1946 | unsigned long capacity) |
| 1947 | { |
| 1948 | if (per_cpu(cpu_sched_capacity_reqs, cpu).dl != capacity) { |
| 1949 | per_cpu(cpu_sched_capacity_reqs, cpu).dl = capacity; |
| 1950 | update_cpu_capacity_request(cpu, request); |
| 1951 | } |
| 1952 | } |
| 1953 | #else |
| 1954 | static inline bool sched_freq(void) { return false; } |
| 1955 | static inline void set_cfs_cpu_capacity(int cpu, bool request, |
| 1956 | unsigned long capacity) |
| 1957 | { } |
| 1958 | static inline void set_rt_cpu_capacity(int cpu, bool request, |
| 1959 | unsigned long capacity) |
| 1960 | { } |
| 1961 | static inline void set_dl_cpu_capacity(int cpu, bool request, |
| 1962 | unsigned long capacity) |
| 1963 | { } |
| 1964 | #endif |
| 1965 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1966 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1967 | { |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1968 | rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1969 | } |
| 1970 | #else |
| 1971 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } |
| 1972 | static inline void sched_avg_update(struct rq *rq) { } |
| 1973 | #endif |
| 1974 | |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1975 | struct rq_flags { |
| 1976 | unsigned long flags; |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 1977 | struct pin_cookie cookie; |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1978 | }; |
| 1979 | |
| 1980 | struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
Peter Zijlstra | 3e71a46 | 2016-04-28 16:16:33 +0200 | [diff] [blame] | 1981 | __acquires(rq->lock); |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1982 | struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1983 | __acquires(p->pi_lock) |
Peter Zijlstra | 3e71a46 | 2016-04-28 16:16:33 +0200 | [diff] [blame] | 1984 | __acquires(rq->lock); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1985 | |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1986 | static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1987 | __releases(rq->lock) |
| 1988 | { |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 1989 | lockdep_unpin_lock(&rq->lock, rf->cookie); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1990 | raw_spin_unlock(&rq->lock); |
| 1991 | } |
| 1992 | |
| 1993 | static inline void |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1994 | task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1995 | __releases(rq->lock) |
| 1996 | __releases(p->pi_lock) |
| 1997 | { |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 1998 | lockdep_unpin_lock(&rq->lock, rf->cookie); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1999 | raw_spin_unlock(&rq->lock); |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 2000 | raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 2001 | } |
| 2002 | |
Patrick Bellasi | d248900 | 2016-07-28 18:44:40 +0100 | [diff] [blame] | 2003 | extern struct rq *lock_rq_of(struct task_struct *p, struct rq_flags *flags); |
| 2004 | extern void unlock_rq_of(struct rq *rq, struct task_struct *p, struct rq_flags *flags); |
| 2005 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2006 | #ifdef CONFIG_SMP |
| 2007 | #ifdef CONFIG_PREEMPT |
| 2008 | |
| 2009 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); |
| 2010 | |
| 2011 | /* |
| 2012 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
| 2013 | * way at the expense of forcing extra atomic operations in all |
| 2014 | * invocations. This assures that the double_lock is acquired using the |
| 2015 | * same underlying policy as the spinlock_t on this architecture, which |
| 2016 | * reduces latency compared to the unfair variant below. However, it |
| 2017 | * also adds more overhead and therefore may reduce throughput. |
| 2018 | */ |
| 2019 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 2020 | __releases(this_rq->lock) |
| 2021 | __acquires(busiest->lock) |
| 2022 | __acquires(this_rq->lock) |
| 2023 | { |
| 2024 | raw_spin_unlock(&this_rq->lock); |
| 2025 | double_rq_lock(this_rq, busiest); |
| 2026 | |
| 2027 | return 1; |
| 2028 | } |
| 2029 | |
| 2030 | #else |
| 2031 | /* |
| 2032 | * Unfair double_lock_balance: Optimizes throughput at the expense of |
| 2033 | * latency by eliminating extra atomic operations when the locks are |
| 2034 | * already in proper order on entry. This favors lower cpu-ids and will |
| 2035 | * grant the double lock to lower cpus over higher ids under contention, |
| 2036 | * regardless of entry order into the function. |
| 2037 | */ |
| 2038 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 2039 | __releases(this_rq->lock) |
| 2040 | __acquires(busiest->lock) |
| 2041 | __acquires(this_rq->lock) |
| 2042 | { |
| 2043 | int ret = 0; |
| 2044 | |
| 2045 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
| 2046 | if (busiest < this_rq) { |
| 2047 | raw_spin_unlock(&this_rq->lock); |
| 2048 | raw_spin_lock(&busiest->lock); |
| 2049 | raw_spin_lock_nested(&this_rq->lock, |
| 2050 | SINGLE_DEPTH_NESTING); |
| 2051 | ret = 1; |
| 2052 | } else |
| 2053 | raw_spin_lock_nested(&busiest->lock, |
| 2054 | SINGLE_DEPTH_NESTING); |
| 2055 | } |
| 2056 | return ret; |
| 2057 | } |
| 2058 | |
| 2059 | #endif /* CONFIG_PREEMPT */ |
| 2060 | |
| 2061 | /* |
| 2062 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
| 2063 | */ |
| 2064 | static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 2065 | { |
| 2066 | if (unlikely(!irqs_disabled())) { |
| 2067 | /* printk() doesn't work good under rq->lock */ |
| 2068 | raw_spin_unlock(&this_rq->lock); |
| 2069 | BUG_ON(1); |
| 2070 | } |
| 2071 | |
| 2072 | return _double_lock_balance(this_rq, busiest); |
| 2073 | } |
| 2074 | |
| 2075 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| 2076 | __releases(busiest->lock) |
| 2077 | { |
Todd Kjos | a31778a | 2016-07-04 15:04:45 +0100 | [diff] [blame] | 2078 | if (this_rq != busiest) |
| 2079 | raw_spin_unlock(&busiest->lock); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2080 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| 2081 | } |
| 2082 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 2083 | static inline void double_lock(spinlock_t *l1, spinlock_t *l2) |
| 2084 | { |
| 2085 | if (l1 > l2) |
| 2086 | swap(l1, l2); |
| 2087 | |
| 2088 | spin_lock(l1); |
| 2089 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 2090 | } |
| 2091 | |
Mike Galbraith | 60e69ee | 2014-04-07 10:55:15 +0200 | [diff] [blame] | 2092 | static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) |
| 2093 | { |
| 2094 | if (l1 > l2) |
| 2095 | swap(l1, l2); |
| 2096 | |
| 2097 | spin_lock_irq(l1); |
| 2098 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 2099 | } |
| 2100 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 2101 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
| 2102 | { |
| 2103 | if (l1 > l2) |
| 2104 | swap(l1, l2); |
| 2105 | |
| 2106 | raw_spin_lock(l1); |
| 2107 | raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 2108 | } |
| 2109 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2110 | /* |
| 2111 | * double_rq_lock - safely lock two runqueues |
| 2112 | * |
| 2113 | * Note this does not disable interrupts like task_rq_lock, |
| 2114 | * you need to do so manually before calling. |
| 2115 | */ |
| 2116 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 2117 | __acquires(rq1->lock) |
| 2118 | __acquires(rq2->lock) |
| 2119 | { |
| 2120 | BUG_ON(!irqs_disabled()); |
| 2121 | if (rq1 == rq2) { |
| 2122 | raw_spin_lock(&rq1->lock); |
| 2123 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 2124 | } else { |
| 2125 | if (rq1 < rq2) { |
| 2126 | raw_spin_lock(&rq1->lock); |
| 2127 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| 2128 | } else { |
| 2129 | raw_spin_lock(&rq2->lock); |
| 2130 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| 2131 | } |
| 2132 | } |
| 2133 | } |
| 2134 | |
| 2135 | /* |
| 2136 | * double_rq_unlock - safely unlock two runqueues |
| 2137 | * |
| 2138 | * Note this does not restore interrupts like task_rq_unlock, |
| 2139 | * you need to do so manually after calling. |
| 2140 | */ |
| 2141 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 2142 | __releases(rq1->lock) |
| 2143 | __releases(rq2->lock) |
| 2144 | { |
| 2145 | raw_spin_unlock(&rq1->lock); |
| 2146 | if (rq1 != rq2) |
| 2147 | raw_spin_unlock(&rq2->lock); |
| 2148 | else |
| 2149 | __release(rq2->lock); |
| 2150 | } |
| 2151 | |
John Dias | e2c5c98 | 2016-09-15 08:52:27 -0700 | [diff] [blame] | 2152 | /* |
| 2153 | * task_may_not_preempt - check whether a task may not be preemptible soon |
| 2154 | */ |
| 2155 | extern bool task_may_not_preempt(struct task_struct *task, int cpu); |
| 2156 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2157 | #else /* CONFIG_SMP */ |
| 2158 | |
| 2159 | /* |
| 2160 | * double_rq_lock - safely lock two runqueues |
| 2161 | * |
| 2162 | * Note this does not disable interrupts like task_rq_lock, |
| 2163 | * you need to do so manually before calling. |
| 2164 | */ |
| 2165 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 2166 | __acquires(rq1->lock) |
| 2167 | __acquires(rq2->lock) |
| 2168 | { |
| 2169 | BUG_ON(!irqs_disabled()); |
| 2170 | BUG_ON(rq1 != rq2); |
| 2171 | raw_spin_lock(&rq1->lock); |
| 2172 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 2173 | } |
| 2174 | |
| 2175 | /* |
| 2176 | * double_rq_unlock - safely unlock two runqueues |
| 2177 | * |
| 2178 | * Note this does not restore interrupts like task_rq_unlock, |
| 2179 | * you need to do so manually after calling. |
| 2180 | */ |
| 2181 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 2182 | __releases(rq1->lock) |
| 2183 | __releases(rq2->lock) |
| 2184 | { |
| 2185 | BUG_ON(rq1 != rq2); |
| 2186 | raw_spin_unlock(&rq1->lock); |
| 2187 | __release(rq2->lock); |
| 2188 | } |
| 2189 | |
| 2190 | #endif |
| 2191 | |
| 2192 | extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); |
| 2193 | extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); |
Srikar Dronamraju | 6b55c96 | 2015-06-25 22:51:41 +0530 | [diff] [blame] | 2194 | |
| 2195 | #ifdef CONFIG_SCHED_DEBUG |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2196 | extern void print_cfs_stats(struct seq_file *m, int cpu); |
| 2197 | extern void print_rt_stats(struct seq_file *m, int cpu); |
Wanpeng Li | acb3213 | 2014-10-31 06:39:33 +0800 | [diff] [blame] | 2198 | extern void print_dl_stats(struct seq_file *m, int cpu); |
Srikar Dronamraju | 6b55c96 | 2015-06-25 22:51:41 +0530 | [diff] [blame] | 2199 | extern void |
| 2200 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
Srikar Dronamraju | 397f237 | 2015-06-25 22:51:43 +0530 | [diff] [blame] | 2201 | |
| 2202 | #ifdef CONFIG_NUMA_BALANCING |
| 2203 | extern void |
| 2204 | show_numa_stats(struct task_struct *p, struct seq_file *m); |
| 2205 | extern void |
| 2206 | print_numa_stats(struct seq_file *m, int node, unsigned long tsf, |
| 2207 | unsigned long tpf, unsigned long gsf, unsigned long gpf); |
| 2208 | #endif /* CONFIG_NUMA_BALANCING */ |
| 2209 | #endif /* CONFIG_SCHED_DEBUG */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2210 | |
| 2211 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
Abel Vesa | 07c54f7 | 2015-03-03 13:50:27 +0200 | [diff] [blame] | 2212 | extern void init_rt_rq(struct rt_rq *rt_rq); |
| 2213 | extern void init_dl_rq(struct dl_rq *dl_rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2214 | |
Ben Segall | 1ee14e6 | 2013-10-16 11:16:12 -0700 | [diff] [blame] | 2215 | extern void cfs_bandwidth_usage_inc(void); |
| 2216 | extern void cfs_bandwidth_usage_dec(void); |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 2217 | |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 2218 | #ifdef CONFIG_NO_HZ_COMMON |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 2219 | enum rq_nohz_flag_bits { |
| 2220 | NOHZ_TICK_STOPPED, |
| 2221 | NOHZ_BALANCE_KICK, |
| 2222 | }; |
| 2223 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 2224 | #define NOHZ_KICK_ANY 0 |
| 2225 | #define NOHZ_KICK_RESTRICT 1 |
| 2226 | |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 2227 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) |
Thomas Gleixner | 20a5c8c | 2016-03-10 12:54:20 +0100 | [diff] [blame] | 2228 | |
| 2229 | extern void nohz_balance_exit_idle(unsigned int cpu); |
| 2230 | #else |
| 2231 | static inline void nohz_balance_exit_idle(unsigned int cpu) { } |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 2232 | #endif |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2233 | |
| 2234 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2235 | struct irqtime { |
| 2236 | u64 hardirq_time; |
| 2237 | u64 softirq_time; |
| 2238 | u64 irq_start_time; |
| 2239 | struct u64_stats_sync sync; |
| 2240 | }; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2241 | |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2242 | DECLARE_PER_CPU(struct irqtime, cpu_irqtime); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2243 | |
| 2244 | static inline u64 irq_time_read(int cpu) |
| 2245 | { |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2246 | struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); |
| 2247 | unsigned int seq; |
| 2248 | u64 total; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2249 | |
| 2250 | do { |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2251 | seq = __u64_stats_fetch_begin(&irqtime->sync); |
| 2252 | total = irqtime->softirq_time + irqtime->hardirq_time; |
| 2253 | } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2254 | |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 2255 | return total; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2256 | } |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 2257 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2258 | |
Vikram Mulukutla | a65aafe | 2017-06-05 13:37:45 -0700 | [diff] [blame] | 2259 | #ifdef CONFIG_SCHED_WALT |
Vikram Mulukutla | c7b54b8 | 2017-07-12 11:34:54 -0700 | [diff] [blame] | 2260 | void note_task_waking(struct task_struct *p, u64 wallclock); |
Vikram Mulukutla | a65aafe | 2017-06-05 13:37:45 -0700 | [diff] [blame] | 2261 | #else /* CONFIG_SCHED_WALT */ |
Vikram Mulukutla | c7b54b8 | 2017-07-12 11:34:54 -0700 | [diff] [blame] | 2262 | static inline void note_task_waking(struct task_struct *p, u64 wallclock) { } |
Vikram Mulukutla | a65aafe | 2017-06-05 13:37:45 -0700 | [diff] [blame] | 2263 | #endif /* CONFIG_SCHED_WALT */ |
| 2264 | |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2265 | #ifdef CONFIG_CPU_FREQ |
| 2266 | DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); |
| 2267 | |
| 2268 | /** |
| 2269 | * cpufreq_update_util - Take a note about CPU utilization changes. |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 2270 | * @rq: Runqueue to carry out the update for. |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 2271 | * @flags: Update reason flags. |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2272 | * |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 2273 | * This function is called by the scheduler on the CPU whose utilization is |
| 2274 | * being updated. |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2275 | * |
| 2276 | * It can only be called from RCU-sched read-side critical sections. |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2277 | * |
| 2278 | * The way cpufreq is currently arranged requires it to evaluate the CPU |
| 2279 | * performance state (frequency/voltage) on a regular basis to prevent it from |
| 2280 | * being stuck in a completely inadequate performance level for too long. |
| 2281 | * That is not guaranteed to happen if the updates are only triggered from CFS, |
| 2282 | * though, because they may not be coming in if RT or deadline tasks are active |
| 2283 | * all the time (or there are RT and DL tasks only). |
| 2284 | * |
| 2285 | * As a workaround for that issue, this function is called by the RT and DL |
| 2286 | * sched classes to trigger extra cpufreq updates to prevent it from stalling, |
| 2287 | * but that really is a band-aid. Going forward it should be replaced with |
| 2288 | * solutions targeted more specifically at RT and DL tasks. |
| 2289 | */ |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 2290 | static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2291 | { |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 2292 | struct update_util_data *data; |
| 2293 | |
Vikram Mulukutla | ab968a4 | 2017-05-08 19:18:22 -0700 | [diff] [blame] | 2294 | #ifdef CONFIG_SCHED_WALT |
Syed Rameez Mustafa | e14a233 | 2017-05-19 14:42:35 -0700 | [diff] [blame] | 2295 | unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG | |
Puja Gupta | 8cd9db4 | 2017-09-21 10:58:56 -0700 | [diff] [blame] | 2296 | SCHED_CPUFREQ_PL | SCHED_CPUFREQ_EARLY_DET; |
Syed Rameez Mustafa | e14a233 | 2017-05-19 14:42:35 -0700 | [diff] [blame] | 2297 | |
Vikram Mulukutla | 4b54aae | 2017-03-20 13:41:37 -0700 | [diff] [blame] | 2298 | /* |
| 2299 | * Skip if we've already reported, but not if this is an inter-cluster |
Vikram Mulukutla | f668669 | 2017-06-06 11:58:27 -0700 | [diff] [blame] | 2300 | * migration. Also only allow WALT update sites. |
Vikram Mulukutla | 4b54aae | 2017-03-20 13:41:37 -0700 | [diff] [blame] | 2301 | */ |
Vikram Mulukutla | f668669 | 2017-06-06 11:58:27 -0700 | [diff] [blame] | 2302 | if (!(flags & SCHED_CPUFREQ_WALT)) |
| 2303 | return; |
Vikram Mulukutla | 4b54aae | 2017-03-20 13:41:37 -0700 | [diff] [blame] | 2304 | if (!sched_disable_window_stats && |
| 2305 | (rq->load_reported_window == rq->window_start) && |
Syed Rameez Mustafa | e14a233 | 2017-05-19 14:42:35 -0700 | [diff] [blame] | 2306 | !(flags & exception_flags)) |
Vikram Mulukutla | 4b54aae | 2017-03-20 13:41:37 -0700 | [diff] [blame] | 2307 | return; |
| 2308 | rq->load_reported_window = rq->window_start; |
| 2309 | #endif |
| 2310 | |
Vikram Mulukutla | bab4188 | 2017-05-09 17:49:47 -0700 | [diff] [blame] | 2311 | data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, |
| 2312 | cpu_of(rq))); |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 2313 | if (data) |
Stephen Boyd | 24c1812 | 2017-08-15 10:39:25 -0700 | [diff] [blame] | 2314 | data->func(data, ktime_get_ns(), flags); |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 2315 | } |
| 2316 | |
| 2317 | static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) |
| 2318 | { |
| 2319 | if (cpu_of(rq) == smp_processor_id()) |
| 2320 | cpufreq_update_util(rq, flags); |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2321 | } |
| 2322 | #else |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 2323 | static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} |
| 2324 | static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {} |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 2325 | #endif /* CONFIG_CPU_FREQ */ |
Linus Torvalds | be53f58 | 2016-03-24 09:42:50 -0700 | [diff] [blame] | 2326 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 2327 | #ifdef arch_scale_freq_capacity |
| 2328 | #ifndef arch_scale_freq_invariant |
| 2329 | #define arch_scale_freq_invariant() (true) |
| 2330 | #endif |
| 2331 | #else /* arch_scale_freq_capacity */ |
| 2332 | #define arch_scale_freq_invariant() (false) |
| 2333 | #endif |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2334 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 2335 | #ifdef CONFIG_SCHED_WALT |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2336 | |
| 2337 | static inline int cluster_first_cpu(struct sched_cluster *cluster) |
| 2338 | { |
| 2339 | return cpumask_first(&cluster->cpus); |
| 2340 | } |
| 2341 | |
| 2342 | struct related_thread_group { |
| 2343 | int id; |
| 2344 | raw_spinlock_t lock; |
| 2345 | struct list_head tasks; |
| 2346 | struct list_head list; |
| 2347 | struct sched_cluster *preferred_cluster; |
| 2348 | struct rcu_head rcu; |
| 2349 | u64 last_update; |
| 2350 | }; |
| 2351 | |
| 2352 | extern struct list_head cluster_head; |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2353 | extern struct sched_cluster *sched_cluster[NR_CPUS]; |
| 2354 | |
| 2355 | #define for_each_sched_cluster(cluster) \ |
| 2356 | list_for_each_entry_rcu(cluster, &cluster_head, list) |
| 2357 | |
| 2358 | #define WINDOW_STATS_RECENT 0 |
| 2359 | #define WINDOW_STATS_MAX 1 |
| 2360 | #define WINDOW_STATS_MAX_RECENT_AVG 2 |
| 2361 | #define WINDOW_STATS_AVG 3 |
| 2362 | #define WINDOW_STATS_INVALID_POLICY 4 |
| 2363 | |
| 2364 | #define SCHED_UPMIGRATE_MIN_NICE 15 |
| 2365 | #define EXITING_TASK_MARKER 0xdeaddead |
| 2366 | |
| 2367 | #define UP_MIGRATION 1 |
| 2368 | #define DOWN_MIGRATION 2 |
| 2369 | #define IRQLOAD_MIGRATION 3 |
| 2370 | |
| 2371 | extern struct mutex policy_mutex; |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2372 | extern unsigned int sched_disable_window_stats; |
| 2373 | extern unsigned int max_possible_freq; |
| 2374 | extern unsigned int min_max_freq; |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2375 | extern unsigned int max_possible_efficiency; |
| 2376 | extern unsigned int min_possible_efficiency; |
| 2377 | extern unsigned int max_capacity; |
| 2378 | extern unsigned int min_capacity; |
| 2379 | extern unsigned int max_load_scale_factor; |
| 2380 | extern unsigned int max_possible_capacity; |
| 2381 | extern unsigned int min_max_possible_capacity; |
| 2382 | extern unsigned int max_power_cost; |
| 2383 | extern unsigned int sched_init_task_load_windows; |
| 2384 | extern unsigned int up_down_migrate_scale_factor; |
| 2385 | extern unsigned int sysctl_sched_restrict_cluster_spill; |
| 2386 | extern unsigned int sched_pred_alert_load; |
| 2387 | extern struct sched_cluster init_cluster; |
| 2388 | extern unsigned int __read_mostly sched_short_sleep_task_threshold; |
| 2389 | extern unsigned int __read_mostly sched_long_cpu_selection_threshold; |
| 2390 | extern unsigned int __read_mostly sched_big_waker_task_load; |
| 2391 | extern unsigned int __read_mostly sched_small_wakee_task_load; |
| 2392 | extern unsigned int __read_mostly sched_spill_load; |
| 2393 | extern unsigned int __read_mostly sched_upmigrate; |
| 2394 | extern unsigned int __read_mostly sched_downmigrate; |
| 2395 | extern unsigned int __read_mostly sysctl_sched_spill_nr_run; |
| 2396 | extern unsigned int __read_mostly sched_load_granule; |
| 2397 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2398 | extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2399 | extern int update_preferred_cluster(struct related_thread_group *grp, |
| 2400 | struct task_struct *p, u32 old_load); |
| 2401 | extern void set_preferred_cluster(struct related_thread_group *grp); |
| 2402 | extern void add_new_task_to_grp(struct task_struct *new); |
| 2403 | extern unsigned int update_freq_aggregate_threshold(unsigned int threshold); |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2404 | |
| 2405 | #define NO_BOOST 0 |
| 2406 | #define FULL_THROTTLE_BOOST 1 |
| 2407 | #define CONSERVATIVE_BOOST 2 |
| 2408 | #define RESTRAINED_BOOST 3 |
| 2409 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2410 | static inline int cpu_capacity(int cpu) |
| 2411 | { |
| 2412 | return cpu_rq(cpu)->cluster->capacity; |
| 2413 | } |
| 2414 | |
| 2415 | static inline int cpu_max_possible_capacity(int cpu) |
| 2416 | { |
| 2417 | return cpu_rq(cpu)->cluster->max_possible_capacity; |
| 2418 | } |
| 2419 | |
| 2420 | static inline int cpu_load_scale_factor(int cpu) |
| 2421 | { |
| 2422 | return cpu_rq(cpu)->cluster->load_scale_factor; |
| 2423 | } |
| 2424 | |
| 2425 | static inline int cpu_efficiency(int cpu) |
| 2426 | { |
| 2427 | return cpu_rq(cpu)->cluster->efficiency; |
| 2428 | } |
| 2429 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2430 | static inline unsigned int cpu_min_freq(int cpu) |
| 2431 | { |
| 2432 | return cpu_rq(cpu)->cluster->min_freq; |
| 2433 | } |
| 2434 | |
| 2435 | static inline unsigned int cluster_max_freq(struct sched_cluster *cluster) |
| 2436 | { |
| 2437 | /* |
| 2438 | * Governor and thermal driver don't know the other party's mitigation |
| 2439 | * voting. So struct cluster saves both and return min() for current |
| 2440 | * cluster fmax. |
| 2441 | */ |
| 2442 | return min(cluster->max_mitigated_freq, cluster->max_freq); |
| 2443 | } |
| 2444 | |
| 2445 | static inline unsigned int cpu_max_freq(int cpu) |
| 2446 | { |
| 2447 | return cluster_max_freq(cpu_rq(cpu)->cluster); |
| 2448 | } |
| 2449 | |
| 2450 | static inline unsigned int cpu_max_possible_freq(int cpu) |
| 2451 | { |
| 2452 | return cpu_rq(cpu)->cluster->max_possible_freq; |
| 2453 | } |
| 2454 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 2455 | /* Keep track of max/min capacity possible across CPUs "currently" */ |
| 2456 | static inline void __update_min_max_capacity(void) |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2457 | { |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 2458 | int i; |
| 2459 | int max_cap = 0, min_cap = INT_MAX; |
| 2460 | |
| 2461 | for_each_online_cpu(i) { |
| 2462 | max_cap = max(max_cap, cpu_capacity(i)); |
| 2463 | min_cap = min(min_cap, cpu_capacity(i)); |
| 2464 | } |
| 2465 | |
| 2466 | max_capacity = max_cap; |
| 2467 | min_capacity = min_cap; |
| 2468 | } |
| 2469 | |
| 2470 | /* |
| 2471 | * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so |
| 2472 | * that "most" efficient cpu gets a load_scale_factor of 1 |
| 2473 | */ |
| 2474 | static inline unsigned long |
| 2475 | load_scale_cpu_efficiency(struct sched_cluster *cluster) |
| 2476 | { |
| 2477 | return DIV_ROUND_UP(1024 * max_possible_efficiency, |
| 2478 | cluster->efficiency); |
| 2479 | } |
| 2480 | |
| 2481 | /* |
| 2482 | * Return load_scale_factor of a cpu in reference to cpu with best max_freq |
| 2483 | * (max_possible_freq), so that one with best max_freq gets a load_scale_factor |
| 2484 | * of 1. |
| 2485 | */ |
| 2486 | static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster) |
| 2487 | { |
| 2488 | return DIV_ROUND_UP(1024 * max_possible_freq, |
| 2489 | cluster_max_freq(cluster)); |
| 2490 | } |
| 2491 | |
| 2492 | static inline int compute_load_scale_factor(struct sched_cluster *cluster) |
| 2493 | { |
| 2494 | int load_scale = 1024; |
| 2495 | |
| 2496 | /* |
| 2497 | * load_scale_factor accounts for the fact that task load |
| 2498 | * is in reference to "best" performing cpu. Task's load will need to be |
| 2499 | * scaled (up) by a factor to determine suitability to be placed on a |
| 2500 | * (little) cpu. |
| 2501 | */ |
| 2502 | load_scale *= load_scale_cpu_efficiency(cluster); |
| 2503 | load_scale >>= 10; |
| 2504 | |
| 2505 | load_scale *= load_scale_cpu_freq(cluster); |
| 2506 | load_scale >>= 10; |
| 2507 | |
| 2508 | return load_scale; |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2509 | } |
| 2510 | |
| 2511 | static inline int cpu_max_power_cost(int cpu) |
| 2512 | { |
| 2513 | return cpu_rq(cpu)->cluster->max_power_cost; |
| 2514 | } |
| 2515 | |
| 2516 | static inline int cpu_min_power_cost(int cpu) |
| 2517 | { |
| 2518 | return cpu_rq(cpu)->cluster->min_power_cost; |
| 2519 | } |
| 2520 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2521 | static inline bool hmp_capable(void) |
| 2522 | { |
| 2523 | return max_possible_capacity != min_max_possible_capacity; |
| 2524 | } |
| 2525 | |
Pavankumar Kondeti | 00530928 | 2017-05-10 15:43:29 +0530 | [diff] [blame] | 2526 | static inline bool is_max_capacity_cpu(int cpu) |
| 2527 | { |
| 2528 | return cpu_max_possible_capacity(cpu) == max_possible_capacity; |
| 2529 | } |
| 2530 | |
Pavankumar Kondeti | 271e314 | 2017-06-23 14:20:24 +0530 | [diff] [blame] | 2531 | static inline bool is_min_capacity_cpu(int cpu) |
| 2532 | { |
| 2533 | return cpu_max_possible_capacity(cpu) == min_max_possible_capacity; |
| 2534 | } |
| 2535 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2536 | /* |
| 2537 | * 'load' is in reference to "best cpu" at its best frequency. |
| 2538 | * Scale that in reference to a given cpu, accounting for how bad it is |
| 2539 | * in reference to "best cpu". |
| 2540 | */ |
| 2541 | static inline u64 scale_load_to_cpu(u64 task_load, int cpu) |
| 2542 | { |
| 2543 | u64 lsf = cpu_load_scale_factor(cpu); |
| 2544 | |
| 2545 | if (lsf != 1024) { |
| 2546 | task_load *= lsf; |
| 2547 | task_load /= 1024; |
| 2548 | } |
| 2549 | |
| 2550 | return task_load; |
| 2551 | } |
| 2552 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 2553 | /* |
| 2554 | * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that |
| 2555 | * least efficient cpu gets capacity of 1024 |
| 2556 | */ |
| 2557 | static unsigned long |
| 2558 | capacity_scale_cpu_efficiency(struct sched_cluster *cluster) |
| 2559 | { |
| 2560 | return (1024 * cluster->efficiency) / min_possible_efficiency; |
| 2561 | } |
| 2562 | |
| 2563 | /* |
| 2564 | * Return 'capacity' of a cpu in reference to cpu with lowest max_freq |
| 2565 | * (min_max_freq), such that one with lowest max_freq gets capacity of 1024. |
| 2566 | */ |
| 2567 | static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster) |
| 2568 | { |
| 2569 | return (1024 * cluster_max_freq(cluster)) / min_max_freq; |
| 2570 | } |
| 2571 | |
| 2572 | static inline int compute_capacity(struct sched_cluster *cluster) |
| 2573 | { |
| 2574 | int capacity = 1024; |
| 2575 | |
| 2576 | capacity *= capacity_scale_cpu_efficiency(cluster); |
| 2577 | capacity >>= 10; |
| 2578 | |
| 2579 | capacity *= capacity_scale_cpu_freq(cluster); |
| 2580 | capacity >>= 10; |
| 2581 | |
| 2582 | return capacity; |
| 2583 | } |
| 2584 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2585 | static inline unsigned int task_load(struct task_struct *p) |
| 2586 | { |
| 2587 | return p->ravg.demand; |
| 2588 | } |
| 2589 | |
Pavankumar Kondeti | fe1a696 | 2017-07-25 11:08:17 +0530 | [diff] [blame] | 2590 | static inline unsigned int task_pl(struct task_struct *p) |
| 2591 | { |
| 2592 | return p->ravg.pred_demand; |
| 2593 | } |
| 2594 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2595 | #define pct_to_real(tunable) \ |
| 2596 | (div64_u64((u64)tunable * (u64)max_task_load(), 100)) |
| 2597 | |
| 2598 | #define real_to_pct(tunable) \ |
| 2599 | (div64_u64((u64)tunable * (u64)100, (u64)max_task_load())) |
| 2600 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2601 | static inline bool task_in_related_thread_group(struct task_struct *p) |
| 2602 | { |
| 2603 | return !!(rcu_access_pointer(p->grp) != NULL); |
| 2604 | } |
| 2605 | |
| 2606 | static inline |
| 2607 | struct related_thread_group *task_related_thread_group(struct task_struct *p) |
| 2608 | { |
| 2609 | return rcu_dereference(p->grp); |
| 2610 | } |
| 2611 | |
| 2612 | #define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand) |
| 2613 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2614 | /* Is frequency of two cpus synchronized with each other? */ |
| 2615 | static inline int same_freq_domain(int src_cpu, int dst_cpu) |
| 2616 | { |
| 2617 | struct rq *rq = cpu_rq(src_cpu); |
| 2618 | |
| 2619 | if (src_cpu == dst_cpu) |
| 2620 | return 1; |
| 2621 | |
| 2622 | return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask); |
| 2623 | } |
| 2624 | |
| 2625 | #define BOOST_KICK 0 |
| 2626 | #define CPU_RESERVED 1 |
| 2627 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2628 | extern int sched_boost(void); |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2629 | extern int preferred_cluster(struct sched_cluster *cluster, |
| 2630 | struct task_struct *p); |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2631 | extern struct sched_cluster *rq_cluster(struct rq *rq); |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2632 | extern void reset_task_stats(struct task_struct *p); |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2633 | extern void clear_top_tasks_bitmap(unsigned long *bitmap); |
| 2634 | |
| 2635 | #if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE) |
| 2636 | extern bool task_sched_boost(struct task_struct *p); |
| 2637 | extern int sync_cgroup_colocation(struct task_struct *p, bool insert); |
| 2638 | extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2); |
| 2639 | extern void update_cgroup_boost_settings(void); |
| 2640 | extern void restore_cgroup_boost_settings(void); |
| 2641 | |
| 2642 | #else |
| 2643 | static inline bool |
| 2644 | same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2) |
| 2645 | { |
| 2646 | return true; |
| 2647 | } |
| 2648 | |
| 2649 | static inline bool task_sched_boost(struct task_struct *p) |
| 2650 | { |
| 2651 | return true; |
| 2652 | } |
| 2653 | |
| 2654 | static inline void update_cgroup_boost_settings(void) { } |
| 2655 | static inline void restore_cgroup_boost_settings(void) { } |
| 2656 | #endif |
| 2657 | |
| 2658 | extern int alloc_related_thread_groups(void); |
| 2659 | |
| 2660 | extern unsigned long all_cluster_ids[]; |
| 2661 | |
Joonwoo Park | e77a201 | 2016-12-06 18:12:43 -0800 | [diff] [blame] | 2662 | extern void check_for_migration(struct rq *rq, struct task_struct *p); |
| 2663 | |
| 2664 | static inline int is_reserved(int cpu) |
| 2665 | { |
| 2666 | struct rq *rq = cpu_rq(cpu); |
| 2667 | |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 2668 | return test_bit(CPU_RESERVED, &rq->walt_flags); |
Joonwoo Park | e77a201 | 2016-12-06 18:12:43 -0800 | [diff] [blame] | 2669 | } |
| 2670 | |
| 2671 | static inline int mark_reserved(int cpu) |
| 2672 | { |
| 2673 | struct rq *rq = cpu_rq(cpu); |
| 2674 | |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 2675 | return test_and_set_bit(CPU_RESERVED, &rq->walt_flags); |
Joonwoo Park | e77a201 | 2016-12-06 18:12:43 -0800 | [diff] [blame] | 2676 | } |
| 2677 | |
| 2678 | static inline void clear_reserved(int cpu) |
| 2679 | { |
| 2680 | struct rq *rq = cpu_rq(cpu); |
| 2681 | |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 2682 | clear_bit(CPU_RESERVED, &rq->walt_flags); |
Joonwoo Park | e77a201 | 2016-12-06 18:12:43 -0800 | [diff] [blame] | 2683 | } |
| 2684 | |
Joonwoo Park | 84a8088 | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 2685 | static inline bool |
Joonwoo Park | 84a8088 | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 2686 | task_in_cum_window_demand(struct rq *rq, struct task_struct *p) |
| 2687 | { |
Pavankumar Kondeti | 0cebff0 | 2017-07-21 16:28:12 +0530 | [diff] [blame] | 2688 | return cpu_of(rq) == task_cpu(p) && (p->on_rq || p->last_sleep_ts >= |
| 2689 | rq->window_start); |
Joonwoo Park | 84a8088 | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 2690 | } |
| 2691 | |
Pavankumar Kondeti | 0cebff0 | 2017-07-21 16:28:12 +0530 | [diff] [blame] | 2692 | static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta) |
Joonwoo Park | 84a8088 | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 2693 | { |
| 2694 | rq->cum_window_demand += delta; |
Pavankumar Kondeti | 0cebff0 | 2017-07-21 16:28:12 +0530 | [diff] [blame] | 2695 | if (unlikely((s64)rq->cum_window_demand < 0)) |
| 2696 | rq->cum_window_demand = 0; |
Joonwoo Park | 84a8088 | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 2697 | } |
| 2698 | |
Vikram Mulukutla | d0ba188 | 2017-02-03 12:56:26 -0800 | [diff] [blame] | 2699 | extern void update_cpu_cluster_capacity(const cpumask_t *cpus); |
| 2700 | |
| 2701 | extern unsigned long thermal_cap(int cpu); |
| 2702 | |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 2703 | extern void clear_walt_request(int cpu); |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 2704 | |
| 2705 | extern int got_boost_kick(void); |
| 2706 | extern void clear_boost_kick(int cpu); |
| 2707 | extern enum sched_boost_policy sched_boost_policy(void); |
| 2708 | extern void sched_boost_parse_dt(void); |
Syed Rameez Mustafa | 25de011 | 2017-05-10 12:09:15 -0700 | [diff] [blame] | 2709 | extern void clear_ed_task(struct task_struct *p, struct rq *rq); |
| 2710 | extern bool early_detection_notify(struct rq *rq, u64 wallclock); |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 2711 | |
Pavankumar Kondeti | c5927f1 | 2017-10-11 12:36:12 +0530 | [diff] [blame^] | 2712 | static inline unsigned int power_cost(int cpu, bool max) |
Pavankumar Kondeti | f9026cd | 2017-06-07 15:03:32 +0530 | [diff] [blame] | 2713 | { |
Pavankumar Kondeti | c5927f1 | 2017-10-11 12:36:12 +0530 | [diff] [blame^] | 2714 | struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1]; |
| 2715 | |
| 2716 | if (!sge || !sge->nr_cap_states) |
| 2717 | return cpu_max_possible_capacity(cpu); |
| 2718 | |
| 2719 | if (max) |
| 2720 | return sge->cap_states[sge->nr_cap_states - 1].power; |
| 2721 | else |
| 2722 | return sge->cap_states[0].power; |
Pavankumar Kondeti | f9026cd | 2017-06-07 15:03:32 +0530 | [diff] [blame] | 2723 | } |
Pavankumar Kondeti | f9026cd | 2017-06-07 15:03:32 +0530 | [diff] [blame] | 2724 | |
Pavankumar Kondeti | c5927f1 | 2017-10-11 12:36:12 +0530 | [diff] [blame^] | 2725 | extern void walt_sched_energy_populated_callback(void); |
| 2726 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 2727 | #else /* CONFIG_SCHED_WALT */ |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2728 | |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 2729 | struct walt_sched_stats; |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2730 | struct related_thread_group; |
| 2731 | struct sched_cluster; |
| 2732 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2733 | static inline bool task_sched_boost(struct task_struct *p) |
| 2734 | { |
| 2735 | return true; |
| 2736 | } |
| 2737 | |
Joonwoo Park | e77a201 | 2016-12-06 18:12:43 -0800 | [diff] [blame] | 2738 | static inline void check_for_migration(struct rq *rq, struct task_struct *p) { } |
| 2739 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2740 | static inline int sched_boost(void) |
| 2741 | { |
| 2742 | return 0; |
| 2743 | } |
| 2744 | |
Pavankumar Kondeti | 271e314 | 2017-06-23 14:20:24 +0530 | [diff] [blame] | 2745 | static inline bool hmp_capable(void) { return false; } |
Pavankumar Kondeti | 00530928 | 2017-05-10 15:43:29 +0530 | [diff] [blame] | 2746 | static inline bool is_max_capacity_cpu(int cpu) { return true; } |
Pavankumar Kondeti | 271e314 | 2017-06-23 14:20:24 +0530 | [diff] [blame] | 2747 | static inline bool is_min_capacity_cpu(int cpu) { return true; } |
Pavankumar Kondeti | 00530928 | 2017-05-10 15:43:29 +0530 | [diff] [blame] | 2748 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2749 | static inline int |
| 2750 | preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) |
| 2751 | { |
| 2752 | return 1; |
| 2753 | } |
| 2754 | |
| 2755 | static inline struct sched_cluster *rq_cluster(struct rq *rq) |
| 2756 | { |
| 2757 | return NULL; |
| 2758 | } |
| 2759 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2760 | static inline u64 scale_load_to_cpu(u64 load, int cpu) |
| 2761 | { |
| 2762 | return load; |
| 2763 | } |
| 2764 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2765 | static inline int cpu_capacity(int cpu) |
| 2766 | { |
| 2767 | return SCHED_CAPACITY_SCALE; |
| 2768 | } |
| 2769 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2770 | static inline void set_preferred_cluster(struct related_thread_group *grp) { } |
| 2771 | |
| 2772 | static inline bool task_in_related_thread_group(struct task_struct *p) |
| 2773 | { |
| 2774 | return false; |
| 2775 | } |
| 2776 | |
| 2777 | static inline |
| 2778 | struct related_thread_group *task_related_thread_group(struct task_struct *p) |
| 2779 | { |
| 2780 | return NULL; |
| 2781 | } |
| 2782 | |
| 2783 | static inline u32 task_load(struct task_struct *p) { return 0; } |
Pavankumar Kondeti | fe1a696 | 2017-07-25 11:08:17 +0530 | [diff] [blame] | 2784 | static inline u32 task_pl(struct task_struct *p) { return 0; } |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2785 | |
| 2786 | static inline int update_preferred_cluster(struct related_thread_group *grp, |
| 2787 | struct task_struct *p, u32 old_load) |
| 2788 | { |
| 2789 | return 0; |
| 2790 | } |
| 2791 | |
| 2792 | static inline void add_new_task_to_grp(struct task_struct *new) {} |
| 2793 | |
| 2794 | #define PRED_DEMAND_DELTA (0) |
| 2795 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2796 | static inline int same_freq_domain(int src_cpu, int dst_cpu) |
| 2797 | { |
| 2798 | return 1; |
| 2799 | } |
| 2800 | |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2801 | static inline void clear_reserved(int cpu) { } |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2802 | static inline int alloc_related_thread_groups(void) { return 0; } |
| 2803 | |
| 2804 | #define trace_sched_cpu_load(...) |
| 2805 | #define trace_sched_cpu_load_lb(...) |
| 2806 | #define trace_sched_cpu_load_cgroup(...) |
| 2807 | #define trace_sched_cpu_load_wakeup(...) |
| 2808 | |
Pavankumar Kondeti | 0cebff0 | 2017-07-21 16:28:12 +0530 | [diff] [blame] | 2809 | static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta) { } |
Joonwoo Park | 84a8088 | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 2810 | |
Vikram Mulukutla | d0ba188 | 2017-02-03 12:56:26 -0800 | [diff] [blame] | 2811 | static inline void update_cpu_cluster_capacity(const cpumask_t *cpus) { } |
| 2812 | |
| 2813 | #ifdef CONFIG_SMP |
| 2814 | static inline unsigned long thermal_cap(int cpu) |
| 2815 | { |
| 2816 | return cpu_rq(cpu)->cpu_capacity_orig; |
| 2817 | } |
| 2818 | #endif |
| 2819 | |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 2820 | static inline void clear_walt_request(int cpu) { } |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 2821 | |
| 2822 | static inline int got_boost_kick(void) |
| 2823 | { |
| 2824 | return 0; |
| 2825 | } |
| 2826 | |
| 2827 | static inline void clear_boost_kick(int cpu) { } |
| 2828 | |
| 2829 | static inline enum sched_boost_policy sched_boost_policy(void) |
| 2830 | { |
| 2831 | return SCHED_BOOST_NONE; |
| 2832 | } |
| 2833 | |
| 2834 | static inline void sched_boost_parse_dt(void) { } |
| 2835 | |
Syed Rameez Mustafa | 25de011 | 2017-05-10 12:09:15 -0700 | [diff] [blame] | 2836 | static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { } |
| 2837 | |
| 2838 | static inline bool early_detection_notify(struct rq *rq, u64 wallclock) |
| 2839 | { |
| 2840 | return 0; |
| 2841 | } |
| 2842 | |
Pavankumar Kondeti | c5927f1 | 2017-10-11 12:36:12 +0530 | [diff] [blame^] | 2843 | static inline unsigned int power_cost(int cpu, bool max) |
Pavankumar Kondeti | f9026cd | 2017-06-07 15:03:32 +0530 | [diff] [blame] | 2844 | { |
| 2845 | return SCHED_CAPACITY_SCALE; |
| 2846 | } |
| 2847 | |
Pavankumar Kondeti | c5927f1 | 2017-10-11 12:36:12 +0530 | [diff] [blame^] | 2848 | static inline void walt_sched_energy_populated_callback(void) { } |
| 2849 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 2850 | #endif /* CONFIG_SCHED_WALT */ |
Vikram Mulukutla | d056dbc | 2017-02-07 18:58:07 -0800 | [diff] [blame] | 2851 | |
Joonwoo Park | c5ddd4a | 2017-01-12 17:56:46 -0800 | [diff] [blame] | 2852 | static inline bool energy_aware(void) |
| 2853 | { |
| 2854 | return sched_feat(ENERGY_AWARE); |
| 2855 | } |
Joonwoo Park | a5e601e | 2017-09-20 16:13:03 -0700 | [diff] [blame] | 2856 | |
| 2857 | #ifdef CONFIG_SCHED_CORE_ROTATE |
| 2858 | struct find_first_cpu_bit_env { |
| 2859 | unsigned long *avoid_prev_cpu_last; |
| 2860 | int *rotate_cpu_start; |
| 2861 | int interval; |
| 2862 | spinlock_t *rotate_lock; |
| 2863 | }; |
| 2864 | |
| 2865 | int |
| 2866 | find_first_cpu_bit(struct task_struct *p, const cpumask_t *search_cpus, |
| 2867 | struct sched_group *sg_target, bool *avoid_prev_cpu, |
| 2868 | bool *do_rotate, struct find_first_cpu_bit_env *env); |
Pavankumar Kondeti | c72d3a4 | 2017-11-14 15:35:35 +0530 | [diff] [blame] | 2869 | #else |
| 2870 | #define find_first_cpu_bit(...) -1 |
Joonwoo Park | a5e601e | 2017-09-20 16:13:03 -0700 | [diff] [blame] | 2871 | #endif |