blob: 1c1a1512ec553fdbda5e7651d349737495411e2c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ingo Molnar9ccd27c2017-02-03 21:11:09 +01002#ifndef _LINUX_SCHED_SYSCTL_H
3#define _LINUX_SCHED_SYSCTL_H
4
5#include <linux/types.h>
6
7struct ctl_table;
Clark Williamscf4aebc22013-02-07 09:46:59 -06008
9#ifdef CONFIG_DETECT_HUNG_TASK
Li Zefancd646472013-09-23 16:43:58 +080010extern int sysctl_hung_task_check_count;
Clark Williamscf4aebc22013-02-07 09:46:59 -060011extern unsigned int sysctl_hung_task_panic;
Clark Williamscf4aebc22013-02-07 09:46:59 -060012extern unsigned long sysctl_hung_task_timeout_secs;
Aaron Tomlin270750db2014-01-20 17:34:13 +000013extern int sysctl_hung_task_warnings;
Clark Williamscf4aebc22013-02-07 09:46:59 -060014extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
15 void __user *buffer,
16 size_t *lenp, loff_t *ppos);
17#else
18/* Avoid need for ifdefs elsewhere in the code */
19enum { sysctl_hung_task_timeout_secs = 0 };
20#endif
21
Clark Williamscf4aebc22013-02-07 09:46:59 -060022extern unsigned int sysctl_sched_latency;
23extern unsigned int sysctl_sched_min_granularity;
24extern unsigned int sysctl_sched_wakeup_granularity;
25extern unsigned int sysctl_sched_child_runs_first;
26
27enum sched_tunable_scaling {
28 SCHED_TUNABLESCALING_NONE,
29 SCHED_TUNABLESCALING_LOG,
30 SCHED_TUNABLESCALING_LINEAR,
31 SCHED_TUNABLESCALING_END,
32};
33extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
34
35extern unsigned int sysctl_numa_balancing_scan_delay;
36extern unsigned int sysctl_numa_balancing_scan_period_min;
37extern unsigned int sysctl_numa_balancing_scan_period_max;
Clark Williamscf4aebc22013-02-07 09:46:59 -060038extern unsigned int sysctl_numa_balancing_scan_size;
Clark Williamscf4aebc22013-02-07 09:46:59 -060039
40#ifdef CONFIG_SCHED_DEBUG
Matthias Kaehlckea9903f02017-10-30 11:08:16 -070041extern __read_mostly unsigned int sysctl_sched_migration_cost;
42extern __read_mostly unsigned int sysctl_sched_nr_migrate;
43extern __read_mostly unsigned int sysctl_sched_time_avg;
Clark Williamscf4aebc22013-02-07 09:46:59 -060044
45int sched_proc_update_handler(struct ctl_table *table, int write,
46 void __user *buffer, size_t *length,
47 loff_t *ppos);
48#endif
Clark Williamsce0dbbb2013-02-07 09:47:04 -060049
50/*
51 * control realtime throttling:
52 *
53 * /proc/sys/kernel/sched_rt_period_us
54 * /proc/sys/kernel/sched_rt_runtime_us
55 */
Clark Williamscf4aebc22013-02-07 09:46:59 -060056extern unsigned int sysctl_sched_rt_period;
57extern int sysctl_sched_rt_runtime;
58
59#ifdef CONFIG_CFS_BANDWIDTH
60extern unsigned int sysctl_sched_cfs_bandwidth_slice;
61#endif
62
63#ifdef CONFIG_SCHED_AUTOGROUP
64extern unsigned int sysctl_sched_autogroup_enabled;
65#endif
66
Shile Zhang975e1552017-01-28 22:00:49 +080067extern int sysctl_sched_rr_timeslice;
Clark Williamsce0dbbb2013-02-07 09:47:04 -060068extern int sched_rr_timeslice;
69
70extern int sched_rr_handler(struct ctl_table *table, int write,
71 void __user *buffer, size_t *lenp,
72 loff_t *ppos);
73
74extern int sched_rt_handler(struct ctl_table *table, int write,
Clark Williamscf4aebc22013-02-07 09:46:59 -060075 void __user *buffer, size_t *lenp,
76 loff_t *ppos);
77
Andi Kleen54a43d52014-01-23 15:53:13 -080078extern int sysctl_numa_balancing(struct ctl_table *table, int write,
79 void __user *buffer, size_t *lenp,
80 loff_t *ppos);
81
Mel Gormancb251762016-02-05 09:08:36 +000082extern int sysctl_schedstats(struct ctl_table *table, int write,
83 void __user *buffer, size_t *lenp,
84 loff_t *ppos);
85
Ingo Molnar9ccd27c2017-02-03 21:11:09 +010086#endif /* _LINUX_SCHED_SYSCTL_H */