Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 1 | #ifndef _SCHED_SYSCTL_H |
| 2 | #define _SCHED_SYSCTL_H |
| 3 | |
| 4 | #ifdef CONFIG_DETECT_HUNG_TASK |
Li Zefan | cd64647 | 2013-09-23 16:43:58 +0800 | [diff] [blame] | 5 | extern int sysctl_hung_task_check_count; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 6 | extern unsigned int sysctl_hung_task_panic; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 7 | extern unsigned long sysctl_hung_task_timeout_secs; |
Aaron Tomlin | 270750db | 2014-01-20 17:34:13 +0000 | [diff] [blame] | 8 | extern int sysctl_hung_task_warnings; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 9 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
| 10 | void __user *buffer, |
| 11 | size_t *lenp, loff_t *ppos); |
| 12 | #else |
| 13 | /* Avoid need for ifdefs elsewhere in the code */ |
| 14 | enum { sysctl_hung_task_timeout_secs = 0 }; |
| 15 | #endif |
| 16 | |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 17 | extern unsigned int sysctl_sched_latency; |
| 18 | extern unsigned int sysctl_sched_min_granularity; |
| 19 | extern unsigned int sysctl_sched_wakeup_granularity; |
| 20 | extern unsigned int sysctl_sched_child_runs_first; |
Juri Lelli | 1931b93 | 2016-07-29 14:04:11 +0100 | [diff] [blame] | 21 | extern unsigned int sysctl_sched_sync_hint_enable; |
Srinath Sridharan | bf47bdd | 2016-07-14 09:57:29 +0100 | [diff] [blame] | 22 | extern unsigned int sysctl_sched_cstate_aware; |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 23 | #ifdef CONFIG_SCHED_WALT |
| 24 | extern unsigned int sysctl_sched_use_walt_cpu_util; |
| 25 | extern unsigned int sysctl_sched_use_walt_task_util; |
| 26 | extern unsigned int sysctl_sched_walt_init_task_load_pct; |
Srinath Sridharan | 3a73c96 | 2016-07-22 13:21:15 +0100 | [diff] [blame] | 27 | extern unsigned int sysctl_sched_walt_cpu_high_irqload; |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 28 | #endif |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 29 | |
| 30 | enum sched_tunable_scaling { |
| 31 | SCHED_TUNABLESCALING_NONE, |
| 32 | SCHED_TUNABLESCALING_LOG, |
| 33 | SCHED_TUNABLESCALING_LINEAR, |
| 34 | SCHED_TUNABLESCALING_END, |
| 35 | }; |
| 36 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; |
| 37 | |
| 38 | extern unsigned int sysctl_numa_balancing_scan_delay; |
| 39 | extern unsigned int sysctl_numa_balancing_scan_period_min; |
| 40 | extern unsigned int sysctl_numa_balancing_scan_period_max; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 41 | extern unsigned int sysctl_numa_balancing_scan_size; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 42 | |
| 43 | #ifdef CONFIG_SCHED_DEBUG |
Matthias Kaehlcke | 9eea082 | 2017-10-30 11:08:16 -0700 | [diff] [blame] | 44 | extern __read_mostly unsigned int sysctl_sched_migration_cost; |
| 45 | extern __read_mostly unsigned int sysctl_sched_nr_migrate; |
| 46 | extern __read_mostly unsigned int sysctl_sched_time_avg; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 47 | extern unsigned int sysctl_sched_shares_window; |
| 48 | |
| 49 | int sched_proc_update_handler(struct ctl_table *table, int write, |
| 50 | void __user *buffer, size_t *length, |
| 51 | loff_t *ppos); |
| 52 | #endif |
Clark Williams | ce0dbbb | 2013-02-07 09:47:04 -0600 | [diff] [blame] | 53 | |
| 54 | /* |
| 55 | * control realtime throttling: |
| 56 | * |
| 57 | * /proc/sys/kernel/sched_rt_period_us |
| 58 | * /proc/sys/kernel/sched_rt_runtime_us |
| 59 | */ |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 60 | extern unsigned int sysctl_sched_rt_period; |
| 61 | extern int sysctl_sched_rt_runtime; |
| 62 | |
| 63 | #ifdef CONFIG_CFS_BANDWIDTH |
| 64 | extern unsigned int sysctl_sched_cfs_bandwidth_slice; |
| 65 | #endif |
| 66 | |
Patrick Bellasi | 69fa4c7 | 2015-06-22 18:11:44 +0100 | [diff] [blame] | 67 | #ifdef CONFIG_SCHED_TUNE |
| 68 | extern unsigned int sysctl_sched_cfs_boost; |
| 69 | int sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write, |
| 70 | void __user *buffer, size_t *length, |
| 71 | loff_t *ppos); |
| 72 | static inline unsigned int get_sysctl_sched_cfs_boost(void) |
| 73 | { |
| 74 | return sysctl_sched_cfs_boost; |
| 75 | } |
| 76 | #else |
| 77 | static inline unsigned int get_sysctl_sched_cfs_boost(void) |
| 78 | { |
| 79 | return 0; |
| 80 | } |
| 81 | #endif |
| 82 | |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 83 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 84 | extern unsigned int sysctl_sched_autogroup_enabled; |
| 85 | #endif |
| 86 | |
Clark Williams | ce0dbbb | 2013-02-07 09:47:04 -0600 | [diff] [blame] | 87 | extern int sched_rr_timeslice; |
| 88 | |
| 89 | extern int sched_rr_handler(struct ctl_table *table, int write, |
| 90 | void __user *buffer, size_t *lenp, |
| 91 | loff_t *ppos); |
| 92 | |
| 93 | extern int sched_rt_handler(struct ctl_table *table, int write, |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 94 | void __user *buffer, size_t *lenp, |
| 95 | loff_t *ppos); |
| 96 | |
Andi Kleen | 54a43d5 | 2014-01-23 15:53:13 -0800 | [diff] [blame] | 97 | extern int sysctl_numa_balancing(struct ctl_table *table, int write, |
| 98 | void __user *buffer, size_t *lenp, |
| 99 | loff_t *ppos); |
| 100 | |
Mel Gorman | cb25176 | 2016-02-05 09:08:36 +0000 | [diff] [blame] | 101 | extern int sysctl_schedstats(struct ctl_table *table, int write, |
| 102 | void __user *buffer, size_t *lenp, |
| 103 | loff_t *ppos); |
| 104 | |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 105 | #endif /* _SCHED_SYSCTL_H */ |