blob: bd20ed87f5e02abf91764ab83ad6eb469cd09c67 [file] [log] [blame]
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -07001/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __WALT_H
15#define __WALT_H
16
17#ifdef CONFIG_SCHED_WALT
18
19void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event,
20 u64 wallclock, u64 irqtime);
21void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
22void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
Satya Durga Srinivasu Prabhalaa56900fa2017-11-08 12:13:47 -080023
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070024void walt_fixup_busy_time(struct task_struct *p, int new_cpu);
25void walt_init_new_task_load(struct task_struct *p);
26void walt_mark_task_starting(struct task_struct *p);
27void walt_set_window_start(struct rq *rq);
28void walt_migrate_sync_cpu(int cpu);
29void walt_init_cpu_efficiency(void);
30u64 walt_ktime_clock(void);
Srinath Sridharan3a73c962016-07-22 13:21:15 +010031void walt_account_irqtime(int cpu, struct task_struct *curr, u64 delta,
32 u64 wallclock);
33
34u64 walt_irqload(int cpu);
35int walt_cpu_high_irqload(int cpu);
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070036
37#else /* CONFIG_SCHED_WALT */
38
39static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
40 int event, u64 wallclock, u64 irqtime) { }
41static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
42static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070043static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { }
44static inline void walt_init_new_task_load(struct task_struct *p) { }
45static inline void walt_mark_task_starting(struct task_struct *p) { }
46static inline void walt_set_window_start(struct rq *rq) { }
47static inline void walt_migrate_sync_cpu(int cpu) { }
48static inline void walt_init_cpu_efficiency(void) { }
49static inline u64 walt_ktime_clock(void) { return 0; }
50
Patrick Bellasi56e27192017-03-29 09:01:06 +010051#define walt_cpu_high_irqload(cpu) false
52
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070053#endif /* CONFIG_SCHED_WALT */
54
Satya Durga Srinivasu Prabhalaa56900fa2017-11-08 12:13:47 -080055#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SCHED_WALT)
56void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
57 struct task_struct *p);
58void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
59 struct task_struct *p);
60#else
61static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
62 struct task_struct *p) { }
63static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
64 struct task_struct *p) { }
65#endif
66
Vikram Mulukutla44310bf2017-08-10 17:26:20 -070067extern bool walt_disabled;
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070068
69#endif