Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1 | /* |
Pavankumar Kondeti | 4e13d11 | 2018-01-25 01:12:08 +0530 | [diff] [blame] | 2 | * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | |
| 14 | #ifndef __WALT_H |
| 15 | #define __WALT_H |
| 16 | |
| 17 | #ifdef CONFIG_SCHED_WALT |
| 18 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 19 | #include <linux/sched/sysctl.h> |
| 20 | |
| 21 | #define WINDOW_STATS_RECENT 0 |
| 22 | #define WINDOW_STATS_MAX 1 |
| 23 | #define WINDOW_STATS_MAX_RECENT_AVG 2 |
| 24 | #define WINDOW_STATS_AVG 3 |
| 25 | #define WINDOW_STATS_INVALID_POLICY 4 |
| 26 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 27 | #define EXITING_TASK_MARKER 0xdeaddead |
| 28 | |
| 29 | #define FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK 0 |
| 30 | #define FREQ_REPORT_CPU_LOAD 1 |
| 31 | #define FREQ_REPORT_TOP_TASK 2 |
| 32 | |
| 33 | #define for_each_related_thread_group(grp) \ |
| 34 | list_for_each_entry(grp, &active_related_thread_groups, list) |
| 35 | |
| 36 | #define SCHED_NEW_TASK_WINDOWS 5 |
| 37 | |
| 38 | extern unsigned int sched_ravg_window; |
| 39 | extern unsigned int max_possible_efficiency; |
| 40 | extern unsigned int min_possible_efficiency; |
| 41 | extern unsigned int max_possible_freq; |
| 42 | extern unsigned int sched_major_task_runtime; |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 43 | extern unsigned int __read_mostly sched_load_granule; |
| 44 | |
| 45 | extern struct mutex cluster_lock; |
| 46 | extern rwlock_t related_thread_group_lock; |
| 47 | extern __read_mostly unsigned int sched_ravg_hist_size; |
| 48 | extern __read_mostly unsigned int sched_freq_aggregate; |
| 49 | extern __read_mostly int sched_freq_aggregate_threshold; |
| 50 | extern __read_mostly unsigned int sched_window_stats_policy; |
| 51 | extern __read_mostly unsigned int sched_group_upmigrate; |
| 52 | extern __read_mostly unsigned int sched_group_downmigrate; |
| 53 | |
| 54 | extern struct sched_cluster init_cluster; |
| 55 | |
| 56 | extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event, |
| 57 | u64 wallclock, u64 irqtime); |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 58 | |
| 59 | extern unsigned int nr_eligible_big_tasks(int cpu); |
| 60 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 61 | static inline void |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 62 | inc_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p) |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 63 | { |
| 64 | if (sched_disable_window_stats) |
| 65 | return; |
| 66 | |
| 67 | if (p->misfit) |
| 68 | stats->nr_big_tasks++; |
| 69 | } |
| 70 | |
| 71 | static inline void |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 72 | dec_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p) |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 73 | { |
| 74 | if (sched_disable_window_stats) |
| 75 | return; |
| 76 | |
| 77 | if (p->misfit) |
| 78 | stats->nr_big_tasks--; |
| 79 | |
| 80 | BUG_ON(stats->nr_big_tasks < 0); |
| 81 | } |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 82 | |
| 83 | static inline void |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 84 | walt_adjust_nr_big_tasks(struct rq *rq, int delta, bool inc) |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 85 | { |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 86 | if (sched_disable_window_stats) |
| 87 | return; |
| 88 | |
| 89 | sched_update_nr_prod(cpu_of(rq), 0, true); |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 90 | rq->walt_stats.nr_big_tasks += inc ? delta : -delta; |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 91 | |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 92 | BUG_ON(rq->walt_stats.nr_big_tasks < 0); |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | static inline void |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 96 | fixup_cumulative_runnable_avg(struct walt_sched_stats *stats, |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 97 | s64 task_load_delta, s64 pred_demand_delta) |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 98 | { |
| 99 | if (sched_disable_window_stats) |
| 100 | return; |
| 101 | |
| 102 | stats->cumulative_runnable_avg += task_load_delta; |
| 103 | BUG_ON((s64)stats->cumulative_runnable_avg < 0); |
| 104 | |
| 105 | stats->pred_demands_sum += pred_demand_delta; |
| 106 | BUG_ON((s64)stats->pred_demands_sum < 0); |
| 107 | } |
| 108 | |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 109 | static inline void |
| 110 | walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) |
| 111 | { |
| 112 | if (sched_disable_window_stats) |
| 113 | return; |
| 114 | |
| 115 | fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand, |
| 116 | p->ravg.pred_demand); |
Pavankumar Kondeti | 0cebff0 | 2017-07-21 16:28:12 +0530 | [diff] [blame] | 117 | |
| 118 | /* |
| 119 | * Add a task's contribution to the cumulative window demand when |
| 120 | * |
| 121 | * (1) task is enqueued with on_rq = 1 i.e migration, |
| 122 | * prio/cgroup/class change. |
| 123 | * (2) task is waking for the first time in this window. |
| 124 | */ |
| 125 | if (p->on_rq || (p->last_sleep_ts < rq->window_start)) |
| 126 | walt_fixup_cum_window_demand(rq, p->ravg.demand); |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | static inline void |
| 130 | walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) |
| 131 | { |
| 132 | if (sched_disable_window_stats) |
| 133 | return; |
| 134 | |
| 135 | fixup_cumulative_runnable_avg(&rq->walt_stats, -(s64)p->ravg.demand, |
| 136 | -(s64)p->ravg.pred_demand); |
Pavankumar Kondeti | 0cebff0 | 2017-07-21 16:28:12 +0530 | [diff] [blame] | 137 | |
| 138 | /* |
| 139 | * on_rq will be 1 for sleeping tasks. So check if the task |
| 140 | * is migrating or dequeuing in RUNNING state to change the |
| 141 | * prio/cgroup/class. |
| 142 | */ |
| 143 | if (task_on_rq_migrating(p) || p->state == TASK_RUNNING) |
| 144 | walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand); |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p, |
| 148 | u32 new_task_load, |
| 149 | u32 new_pred_demand); |
| 150 | extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p); |
| 151 | extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p); |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 152 | extern void fixup_busy_time(struct task_struct *p, int new_cpu); |
Pavankumar Kondeti | 736630c | 2018-09-20 15:31:36 +0530 | [diff] [blame] | 153 | extern void init_new_task_load(struct task_struct *p); |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 154 | extern void mark_task_starting(struct task_struct *p); |
| 155 | extern void set_window_start(struct rq *rq); |
| 156 | void account_irqtime(int cpu, struct task_struct *curr, u64 delta, |
Srinath Sridharan | 3a73c96 | 2016-07-22 13:21:15 +0100 | [diff] [blame] | 157 | u64 wallclock); |
Pavankumar Kondeti | 1c847af | 2019-09-04 10:08:32 +0530 | [diff] [blame] | 158 | void walt_fixup_cumulative_runnable_avg(struct rq *rq, struct task_struct *p, |
| 159 | u64 new_task_load); |
Satya Durga Srinivasu Prabhala | a56900fa | 2017-11-08 12:13:47 -0800 | [diff] [blame] | 160 | |
Srinath Sridharan | 3a73c96 | 2016-07-22 13:21:15 +0100 | [diff] [blame] | 161 | |
jianzhou | 4be6354 | 2019-12-02 13:57:22 +0800 | [diff] [blame] | 162 | |
Syed Rameez Mustafa | e14a233 | 2017-05-19 14:42:35 -0700 | [diff] [blame] | 163 | extern bool do_pl_notif(struct rq *rq); |
Srinath Sridharan | 3a73c96 | 2016-07-22 13:21:15 +0100 | [diff] [blame] | 164 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 165 | #define SCHED_HIGH_IRQ_TIMEOUT 3 |
| 166 | static inline u64 sched_irqload(int cpu) |
| 167 | { |
| 168 | struct rq *rq = cpu_rq(cpu); |
| 169 | s64 delta; |
| 170 | |
| 171 | delta = get_jiffies_64() - rq->irqload_ts; |
| 172 | /* |
| 173 | * Current context can be preempted by irq and rq->irqload_ts can be |
| 174 | * updated by irq context so that delta can be negative. |
| 175 | * But this is okay and we can safely return as this means there |
| 176 | * was recent irq occurrence. |
| 177 | */ |
| 178 | |
| 179 | if (delta < SCHED_HIGH_IRQ_TIMEOUT) |
| 180 | return rq->avg_irqload; |
| 181 | else |
| 182 | return 0; |
| 183 | } |
| 184 | |
| 185 | static inline int sched_cpu_high_irqload(int cpu) |
| 186 | { |
| 187 | return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload; |
| 188 | } |
| 189 | |
| 190 | static inline int exiting_task(struct task_struct *p) |
| 191 | { |
| 192 | return (p->ravg.sum_history[0] == EXITING_TASK_MARKER); |
| 193 | } |
| 194 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 195 | static inline struct sched_cluster *cpu_cluster(int cpu) |
| 196 | { |
| 197 | return cpu_rq(cpu)->cluster; |
| 198 | } |
| 199 | |
| 200 | static inline u64 |
| 201 | scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq) |
| 202 | { |
| 203 | return div64_u64(load * (u64)src_freq, (u64)dst_freq); |
| 204 | } |
| 205 | |
| 206 | static inline bool is_new_task(struct task_struct *p) |
| 207 | { |
| 208 | return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS; |
| 209 | } |
| 210 | |
| 211 | static inline void clear_top_tasks_table(u8 *table) |
| 212 | { |
| 213 | memset(table, 0, NUM_LOAD_INDICES * sizeof(u8)); |
| 214 | } |
| 215 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 216 | extern void update_cluster_load_subtractions(struct task_struct *p, |
| 217 | int cpu, u64 ws, bool new_task); |
| 218 | extern void sched_account_irqstart(int cpu, struct task_struct *curr, |
| 219 | u64 wallclock); |
| 220 | |
| 221 | static inline unsigned int max_task_load(void) |
| 222 | { |
| 223 | return sched_ravg_window; |
| 224 | } |
| 225 | |
Puja Gupta | ddb8ca4 | 2017-11-16 13:39:33 -0800 | [diff] [blame] | 226 | static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period) |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 227 | { |
| 228 | return div64_u64(cycles, period); |
| 229 | } |
| 230 | |
| 231 | static inline unsigned int cpu_cur_freq(int cpu) |
| 232 | { |
| 233 | return cpu_rq(cpu)->cluster->cur_freq; |
| 234 | } |
| 235 | |
| 236 | static inline void |
| 237 | move_list(struct list_head *dst, struct list_head *src, bool sync_rcu) |
| 238 | { |
| 239 | struct list_head *first, *last; |
| 240 | |
| 241 | first = src->next; |
| 242 | last = src->prev; |
| 243 | |
| 244 | if (sync_rcu) { |
| 245 | INIT_LIST_HEAD_RCU(src); |
| 246 | synchronize_rcu(); |
| 247 | } |
| 248 | |
| 249 | first->prev = dst; |
| 250 | dst->prev = last; |
| 251 | last->next = dst; |
| 252 | |
| 253 | /* Ensure list sanity before making the head visible to all CPUs. */ |
| 254 | smp_mb(); |
| 255 | dst->next = first; |
| 256 | } |
| 257 | |
| 258 | extern void reset_task_stats(struct task_struct *p); |
| 259 | extern void update_cluster_topology(void); |
| 260 | |
| 261 | extern struct list_head cluster_head; |
| 262 | #define for_each_sched_cluster(cluster) \ |
| 263 | list_for_each_entry_rcu(cluster, &cluster_head, list) |
| 264 | |
| 265 | extern void init_clusters(void); |
| 266 | |
| 267 | extern void clear_top_tasks_bitmap(unsigned long *bitmap); |
| 268 | |
| 269 | extern void sched_account_irqtime(int cpu, struct task_struct *curr, |
| 270 | u64 delta, u64 wallclock); |
| 271 | |
| 272 | static inline void assign_cluster_ids(struct list_head *head) |
| 273 | { |
| 274 | struct sched_cluster *cluster; |
| 275 | int pos = 0; |
| 276 | |
| 277 | list_for_each_entry(cluster, head, list) { |
| 278 | cluster->id = pos; |
| 279 | sched_cluster[pos++] = cluster; |
| 280 | } |
| 281 | } |
| 282 | |
| 283 | static inline int same_cluster(int src_cpu, int dst_cpu) |
| 284 | { |
| 285 | return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster; |
| 286 | } |
| 287 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 288 | void walt_irq_work(struct irq_work *irq_work); |
| 289 | |
Vikram Mulukutla | e625d40 | 2017-07-17 12:31:52 -0700 | [diff] [blame] | 290 | void walt_sched_init(struct rq *rq); |
| 291 | |
Pavankumar Kondeti | 59dfcb4 | 2017-11-17 16:32:07 +0530 | [diff] [blame] | 292 | extern int __read_mostly min_power_cpu; |
| 293 | static inline int walt_start_cpu(int prev_cpu) |
| 294 | { |
| 295 | return sysctl_sched_is_big_little ? prev_cpu : min_power_cpu; |
| 296 | } |
| 297 | |
Pavankumar Kondeti | 4e13d11 | 2018-01-25 01:12:08 +0530 | [diff] [blame] | 298 | static inline void walt_update_last_enqueue(struct task_struct *p) |
| 299 | { |
Pavankumar Kondeti | faa0444 | 2018-06-25 16:13:39 +0530 | [diff] [blame] | 300 | p->last_enqueued_ts = sched_ktime_clock(); |
Pavankumar Kondeti | 4e13d11 | 2018-01-25 01:12:08 +0530 | [diff] [blame] | 301 | } |
| 302 | extern void walt_rotate_work_init(void); |
| 303 | extern void walt_rotation_checkpoint(int nr_big); |
| 304 | extern unsigned int walt_rotation_enabled; |
| 305 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 306 | #else /* CONFIG_SCHED_WALT */ |
| 307 | |
Vikram Mulukutla | e625d40 | 2017-07-17 12:31:52 -0700 | [diff] [blame] | 308 | static inline void walt_sched_init(struct rq *rq) { } |
Pavankumar Kondeti | 4e13d11 | 2018-01-25 01:12:08 +0530 | [diff] [blame] | 309 | static inline void walt_rotate_work_init(void) { } |
| 310 | static inline void walt_rotation_checkpoint(int nr_big) { } |
| 311 | static inline void walt_update_last_enqueue(struct task_struct *p) { } |
Pavankumar Kondeti | 1c847af | 2019-09-04 10:08:32 +0530 | [diff] [blame] | 312 | static inline void walt_fixup_cumulative_runnable_avg(struct rq *rq, |
| 313 | struct task_struct *p, |
| 314 | u64 new_task_load) { } |
Vikram Mulukutla | e625d40 | 2017-07-17 12:31:52 -0700 | [diff] [blame] | 315 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 316 | static inline void update_task_ravg(struct task_struct *p, struct rq *rq, |
| 317 | int event, u64 wallclock, u64 irqtime) { } |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 318 | static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, |
| 319 | struct task_struct *p) |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 320 | { |
| 321 | } |
| 322 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 323 | static inline unsigned int nr_eligible_big_tasks(int cpu) |
| 324 | { |
| 325 | return 0; |
| 326 | } |
| 327 | |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 328 | static inline void walt_adjust_nr_big_tasks(struct rq *rq, |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 329 | int delta, bool inc) |
| 330 | { |
| 331 | } |
| 332 | |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 333 | static inline void inc_nr_big_task(struct walt_sched_stats *stats, |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 334 | struct task_struct *p) |
| 335 | { |
| 336 | } |
| 337 | |
Pavankumar Kondeti | 84f72d7 | 2017-07-20 11:00:45 +0530 | [diff] [blame] | 338 | static inline void dec_nr_big_task(struct walt_sched_stats *stats, |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame] | 339 | struct task_struct *p) |
| 340 | { |
| 341 | } |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 342 | static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 343 | struct task_struct *p) |
| 344 | { |
| 345 | } |
| 346 | |
| 347 | static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } |
Pavankumar Kondeti | 736630c | 2018-09-20 15:31:36 +0530 | [diff] [blame] | 348 | static inline void init_new_task_load(struct task_struct *p) |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 349 | { |
| 350 | } |
| 351 | |
| 352 | static inline void mark_task_starting(struct task_struct *p) { } |
| 353 | static inline void set_window_start(struct rq *rq) { } |
| 354 | static inline int sched_cpu_high_irqload(int cpu) { return 0; } |
| 355 | |
Joonwoo Park | f7d6cd4 | 2017-01-17 15:19:43 -0800 | [diff] [blame] | 356 | static inline void sched_account_irqstart(int cpu, struct task_struct *curr, |
| 357 | u64 wallclock) |
| 358 | { |
| 359 | } |
| 360 | |
| 361 | static inline void update_cluster_topology(void) { } |
| 362 | static inline void init_clusters(void) {} |
| 363 | static inline void sched_account_irqtime(int cpu, struct task_struct *curr, |
| 364 | u64 delta, u64 wallclock) |
| 365 | { |
| 366 | } |
| 367 | |
| 368 | static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; } |
Syed Rameez Mustafa | e14a233 | 2017-05-19 14:42:35 -0700 | [diff] [blame] | 369 | static inline bool do_pl_notif(struct rq *rq) { return false; } |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 370 | |
Pavankumar Kondeti | d337050 | 2017-07-20 11:47:13 +0530 | [diff] [blame] | 371 | static inline void |
| 372 | inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { } |
| 373 | |
| 374 | static inline void |
| 375 | dec_rq_walt_stats(struct rq *rq, struct task_struct *p) { } |
| 376 | |
| 377 | static inline void |
| 378 | fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p, |
| 379 | u32 new_task_load, u32 new_pred_demand) |
| 380 | { |
| 381 | } |
| 382 | |
Pavankumar Kondeti | 59dfcb4 | 2017-11-17 16:32:07 +0530 | [diff] [blame] | 383 | static inline int walt_start_cpu(int prev_cpu) |
| 384 | { |
| 385 | return prev_cpu; |
| 386 | } |
| 387 | |
Pavankumar Kondeti | a98dc2a | 2018-04-03 13:48:25 +0530 | [diff] [blame] | 388 | static inline u64 sched_irqload(int cpu) |
| 389 | { |
| 390 | return 0; |
| 391 | } |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 392 | #endif /* CONFIG_SCHED_WALT */ |
| 393 | |
Pavankumar Kondeti | 3a7f9a4 | 2018-04-03 14:27:17 +0530 | [diff] [blame] | 394 | #define walt_cpu_high_irqload(cpu) sched_cpu_high_irqload(cpu) |
| 395 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 396 | #endif |