blob: 9671e56b24d9a30f33d7d9f615fa1332f195c07d [file] [log] [blame]
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -07001/*
Pavankumar Kondeti4e13d112018-01-25 01:12:08 +05302 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __WALT_H
15#define __WALT_H
16
17#ifdef CONFIG_SCHED_WALT
18
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -080019#include <linux/sched/sysctl.h>
20
21#define WINDOW_STATS_RECENT 0
22#define WINDOW_STATS_MAX 1
23#define WINDOW_STATS_MAX_RECENT_AVG 2
24#define WINDOW_STATS_AVG 3
25#define WINDOW_STATS_INVALID_POLICY 4
26
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -080027#define EXITING_TASK_MARKER 0xdeaddead
28
29#define FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK 0
30#define FREQ_REPORT_CPU_LOAD 1
31#define FREQ_REPORT_TOP_TASK 2
32
33#define for_each_related_thread_group(grp) \
34 list_for_each_entry(grp, &active_related_thread_groups, list)
35
36#define SCHED_NEW_TASK_WINDOWS 5
37
38extern unsigned int sched_ravg_window;
39extern unsigned int max_possible_efficiency;
40extern unsigned int min_possible_efficiency;
41extern unsigned int max_possible_freq;
42extern unsigned int sched_major_task_runtime;
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -080043extern unsigned int __read_mostly sched_load_granule;
44
45extern struct mutex cluster_lock;
46extern rwlock_t related_thread_group_lock;
47extern __read_mostly unsigned int sched_ravg_hist_size;
48extern __read_mostly unsigned int sched_freq_aggregate;
49extern __read_mostly int sched_freq_aggregate_threshold;
50extern __read_mostly unsigned int sched_window_stats_policy;
51extern __read_mostly unsigned int sched_group_upmigrate;
52extern __read_mostly unsigned int sched_group_downmigrate;
53
54extern struct sched_cluster init_cluster;
55
56extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
57 u64 wallclock, u64 irqtime);
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +053058
59extern unsigned int nr_eligible_big_tasks(int cpu);
60
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +053061static inline void
Pavankumar Kondeti84f72d72017-07-20 11:00:45 +053062inc_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +053063{
64 if (sched_disable_window_stats)
65 return;
66
67 if (p->misfit)
68 stats->nr_big_tasks++;
69}
70
71static inline void
Pavankumar Kondeti84f72d72017-07-20 11:00:45 +053072dec_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +053073{
74 if (sched_disable_window_stats)
75 return;
76
77 if (p->misfit)
78 stats->nr_big_tasks--;
79
80 BUG_ON(stats->nr_big_tasks < 0);
81}
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +053082
83static inline void
Pavankumar Kondetid3370502017-07-20 11:47:13 +053084walt_adjust_nr_big_tasks(struct rq *rq, int delta, bool inc)
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +053085{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +053086 if (sched_disable_window_stats)
87 return;
88
89 sched_update_nr_prod(cpu_of(rq), 0, true);
Pavankumar Kondetid3370502017-07-20 11:47:13 +053090 rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +053091
Pavankumar Kondetid3370502017-07-20 11:47:13 +053092 BUG_ON(rq->walt_stats.nr_big_tasks < 0);
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -080093}
94
95static inline void
Pavankumar Kondeti84f72d72017-07-20 11:00:45 +053096fixup_cumulative_runnable_avg(struct walt_sched_stats *stats,
Pavankumar Kondetid3370502017-07-20 11:47:13 +053097 s64 task_load_delta, s64 pred_demand_delta)
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -080098{
99 if (sched_disable_window_stats)
100 return;
101
102 stats->cumulative_runnable_avg += task_load_delta;
103 BUG_ON((s64)stats->cumulative_runnable_avg < 0);
104
105 stats->pred_demands_sum += pred_demand_delta;
106 BUG_ON((s64)stats->pred_demands_sum < 0);
107}
108
Pavankumar Kondetid3370502017-07-20 11:47:13 +0530109static inline void
110walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
111{
112 if (sched_disable_window_stats)
113 return;
114
115 fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand,
116 p->ravg.pred_demand);
Pavankumar Kondeti0cebff02017-07-21 16:28:12 +0530117
118 /*
119 * Add a task's contribution to the cumulative window demand when
120 *
121 * (1) task is enqueued with on_rq = 1 i.e migration,
122 * prio/cgroup/class change.
123 * (2) task is waking for the first time in this window.
124 */
125 if (p->on_rq || (p->last_sleep_ts < rq->window_start))
126 walt_fixup_cum_window_demand(rq, p->ravg.demand);
Pavankumar Kondetid3370502017-07-20 11:47:13 +0530127}
128
129static inline void
130walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
131{
132 if (sched_disable_window_stats)
133 return;
134
135 fixup_cumulative_runnable_avg(&rq->walt_stats, -(s64)p->ravg.demand,
136 -(s64)p->ravg.pred_demand);
Pavankumar Kondeti0cebff02017-07-21 16:28:12 +0530137
138 /*
139 * on_rq will be 1 for sleeping tasks. So check if the task
140 * is migrating or dequeuing in RUNNING state to change the
141 * prio/cgroup/class.
142 */
143 if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
144 walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand);
Pavankumar Kondetid3370502017-07-20 11:47:13 +0530145}
146
147extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
148 u32 new_task_load,
149 u32 new_pred_demand);
150extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p);
151extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p);
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800152extern void fixup_busy_time(struct task_struct *p, int new_cpu);
Pavankumar Kondeti736630c2018-09-20 15:31:36 +0530153extern void init_new_task_load(struct task_struct *p);
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800154extern void mark_task_starting(struct task_struct *p);
155extern void set_window_start(struct rq *rq);
156void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
Srinath Sridharan3a73c962016-07-22 13:21:15 +0100157 u64 wallclock);
Pavankumar Kondeti1c847af2019-09-04 10:08:32 +0530158void walt_fixup_cumulative_runnable_avg(struct rq *rq, struct task_struct *p,
159 u64 new_task_load);
Satya Durga Srinivasu Prabhalaa56900fa2017-11-08 12:13:47 -0800160
Srinath Sridharan3a73c962016-07-22 13:21:15 +0100161
jianzhou4be63542019-12-02 13:57:22 +0800162
Syed Rameez Mustafae14a2332017-05-19 14:42:35 -0700163extern bool do_pl_notif(struct rq *rq);
Srinath Sridharan3a73c962016-07-22 13:21:15 +0100164
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800165#define SCHED_HIGH_IRQ_TIMEOUT 3
166static inline u64 sched_irqload(int cpu)
167{
168 struct rq *rq = cpu_rq(cpu);
169 s64 delta;
170
171 delta = get_jiffies_64() - rq->irqload_ts;
172 /*
173 * Current context can be preempted by irq and rq->irqload_ts can be
174 * updated by irq context so that delta can be negative.
175 * But this is okay and we can safely return as this means there
176 * was recent irq occurrence.
177 */
178
179 if (delta < SCHED_HIGH_IRQ_TIMEOUT)
180 return rq->avg_irqload;
181 else
182 return 0;
183}
184
185static inline int sched_cpu_high_irqload(int cpu)
186{
187 return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
188}
189
190static inline int exiting_task(struct task_struct *p)
191{
192 return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
193}
194
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800195static inline struct sched_cluster *cpu_cluster(int cpu)
196{
197 return cpu_rq(cpu)->cluster;
198}
199
200static inline u64
201scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
202{
203 return div64_u64(load * (u64)src_freq, (u64)dst_freq);
204}
205
206static inline bool is_new_task(struct task_struct *p)
207{
208 return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
209}
210
211static inline void clear_top_tasks_table(u8 *table)
212{
213 memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
214}
215
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800216extern void update_cluster_load_subtractions(struct task_struct *p,
217 int cpu, u64 ws, bool new_task);
218extern void sched_account_irqstart(int cpu, struct task_struct *curr,
219 u64 wallclock);
220
221static inline unsigned int max_task_load(void)
222{
223 return sched_ravg_window;
224}
225
Puja Guptaddb8ca42017-11-16 13:39:33 -0800226static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period)
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800227{
228 return div64_u64(cycles, period);
229}
230
231static inline unsigned int cpu_cur_freq(int cpu)
232{
233 return cpu_rq(cpu)->cluster->cur_freq;
234}
235
236static inline void
237move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
238{
239 struct list_head *first, *last;
240
241 first = src->next;
242 last = src->prev;
243
244 if (sync_rcu) {
245 INIT_LIST_HEAD_RCU(src);
246 synchronize_rcu();
247 }
248
249 first->prev = dst;
250 dst->prev = last;
251 last->next = dst;
252
253 /* Ensure list sanity before making the head visible to all CPUs. */
254 smp_mb();
255 dst->next = first;
256}
257
258extern void reset_task_stats(struct task_struct *p);
259extern void update_cluster_topology(void);
260
261extern struct list_head cluster_head;
262#define for_each_sched_cluster(cluster) \
263 list_for_each_entry_rcu(cluster, &cluster_head, list)
264
265extern void init_clusters(void);
266
267extern void clear_top_tasks_bitmap(unsigned long *bitmap);
268
269extern void sched_account_irqtime(int cpu, struct task_struct *curr,
270 u64 delta, u64 wallclock);
271
272static inline void assign_cluster_ids(struct list_head *head)
273{
274 struct sched_cluster *cluster;
275 int pos = 0;
276
277 list_for_each_entry(cluster, head, list) {
278 cluster->id = pos;
279 sched_cluster[pos++] = cluster;
280 }
281}
282
283static inline int same_cluster(int src_cpu, int dst_cpu)
284{
285 return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
286}
287
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530288void walt_irq_work(struct irq_work *irq_work);
289
Vikram Mulukutlae625d402017-07-17 12:31:52 -0700290void walt_sched_init(struct rq *rq);
291
Pavankumar Kondeti59dfcb42017-11-17 16:32:07 +0530292extern int __read_mostly min_power_cpu;
293static inline int walt_start_cpu(int prev_cpu)
294{
295 return sysctl_sched_is_big_little ? prev_cpu : min_power_cpu;
296}
297
Pavankumar Kondeti4e13d112018-01-25 01:12:08 +0530298static inline void walt_update_last_enqueue(struct task_struct *p)
299{
Pavankumar Kondetifaa04442018-06-25 16:13:39 +0530300 p->last_enqueued_ts = sched_ktime_clock();
Pavankumar Kondeti4e13d112018-01-25 01:12:08 +0530301}
302extern void walt_rotate_work_init(void);
303extern void walt_rotation_checkpoint(int nr_big);
304extern unsigned int walt_rotation_enabled;
305
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700306#else /* CONFIG_SCHED_WALT */
307
Vikram Mulukutlae625d402017-07-17 12:31:52 -0700308static inline void walt_sched_init(struct rq *rq) { }
Pavankumar Kondeti4e13d112018-01-25 01:12:08 +0530309static inline void walt_rotate_work_init(void) { }
310static inline void walt_rotation_checkpoint(int nr_big) { }
311static inline void walt_update_last_enqueue(struct task_struct *p) { }
Pavankumar Kondeti1c847af2019-09-04 10:08:32 +0530312static inline void walt_fixup_cumulative_runnable_avg(struct rq *rq,
313 struct task_struct *p,
314 u64 new_task_load) { }
Vikram Mulukutlae625d402017-07-17 12:31:52 -0700315
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800316static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
317 int event, u64 wallclock, u64 irqtime) { }
Pavankumar Kondetid3370502017-07-20 11:47:13 +0530318static inline void walt_inc_cumulative_runnable_avg(struct rq *rq,
319 struct task_struct *p)
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800320{
321}
322
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530323static inline unsigned int nr_eligible_big_tasks(int cpu)
324{
325 return 0;
326}
327
Pavankumar Kondetid3370502017-07-20 11:47:13 +0530328static inline void walt_adjust_nr_big_tasks(struct rq *rq,
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530329 int delta, bool inc)
330{
331}
332
Pavankumar Kondeti84f72d72017-07-20 11:00:45 +0530333static inline void inc_nr_big_task(struct walt_sched_stats *stats,
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530334 struct task_struct *p)
335{
336}
337
Pavankumar Kondeti84f72d72017-07-20 11:00:45 +0530338static inline void dec_nr_big_task(struct walt_sched_stats *stats,
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530339 struct task_struct *p)
340{
341}
Pavankumar Kondetid3370502017-07-20 11:47:13 +0530342static inline void walt_dec_cumulative_runnable_avg(struct rq *rq,
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800343 struct task_struct *p)
344{
345}
346
347static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
Pavankumar Kondeti736630c2018-09-20 15:31:36 +0530348static inline void init_new_task_load(struct task_struct *p)
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800349{
350}
351
352static inline void mark_task_starting(struct task_struct *p) { }
353static inline void set_window_start(struct rq *rq) { }
354static inline int sched_cpu_high_irqload(int cpu) { return 0; }
355
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800356static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
357 u64 wallclock)
358{
359}
360
361static inline void update_cluster_topology(void) { }
362static inline void init_clusters(void) {}
363static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
364 u64 delta, u64 wallclock)
365{
366}
367
368static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
Syed Rameez Mustafae14a2332017-05-19 14:42:35 -0700369static inline bool do_pl_notif(struct rq *rq) { return false; }
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700370
Pavankumar Kondetid3370502017-07-20 11:47:13 +0530371static inline void
372inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
373
374static inline void
375dec_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
376
377static inline void
378fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
379 u32 new_task_load, u32 new_pred_demand)
380{
381}
382
Pavankumar Kondeti59dfcb42017-11-17 16:32:07 +0530383static inline int walt_start_cpu(int prev_cpu)
384{
385 return prev_cpu;
386}
387
Pavankumar Kondetia98dc2a2018-04-03 13:48:25 +0530388static inline u64 sched_irqload(int cpu)
389{
390 return 0;
391}
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700392#endif /* CONFIG_SCHED_WALT */
393
Pavankumar Kondeti3a7f9a42018-04-03 14:27:17 +0530394#define walt_cpu_high_irqload(cpu) sched_cpu_high_irqload(cpu)
395
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700396#endif