Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 1 | #ifndef _SCHED_SYSCTL_H |
| 2 | #define _SCHED_SYSCTL_H |
| 3 | |
| 4 | #ifdef CONFIG_DETECT_HUNG_TASK |
| 5 | extern unsigned int sysctl_hung_task_panic; |
| 6 | extern unsigned long sysctl_hung_task_check_count; |
| 7 | extern unsigned long sysctl_hung_task_timeout_secs; |
| 8 | extern unsigned long sysctl_hung_task_warnings; |
| 9 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
| 10 | void __user *buffer, |
| 11 | size_t *lenp, loff_t *ppos); |
| 12 | #else |
| 13 | /* Avoid need for ifdefs elsewhere in the code */ |
| 14 | enum { sysctl_hung_task_timeout_secs = 0 }; |
| 15 | #endif |
| 16 | |
| 17 | /* |
| 18 | * Default maximum number of active map areas, this limits the number of vmas |
| 19 | * per mm struct. Users can overwrite this number by sysctl but there is a |
| 20 | * problem. |
| 21 | * |
| 22 | * When a program's coredump is generated as ELF format, a section is created |
| 23 | * per a vma. In ELF, the number of sections is represented in unsigned short. |
| 24 | * This means the number of sections should be smaller than 65535 at coredump. |
| 25 | * Because the kernel adds some informative sections to a image of program at |
| 26 | * generating coredump, we need some margin. The number of extra sections is |
| 27 | * 1-3 now and depends on arch. We use "5" as safe margin, here. |
| 28 | */ |
| 29 | #define MAPCOUNT_ELF_CORE_MARGIN (5) |
| 30 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) |
| 31 | |
| 32 | extern int sysctl_max_map_count; |
| 33 | |
| 34 | extern unsigned int sysctl_sched_latency; |
| 35 | extern unsigned int sysctl_sched_min_granularity; |
| 36 | extern unsigned int sysctl_sched_wakeup_granularity; |
| 37 | extern unsigned int sysctl_sched_child_runs_first; |
| 38 | |
| 39 | enum sched_tunable_scaling { |
| 40 | SCHED_TUNABLESCALING_NONE, |
| 41 | SCHED_TUNABLESCALING_LOG, |
| 42 | SCHED_TUNABLESCALING_LINEAR, |
| 43 | SCHED_TUNABLESCALING_END, |
| 44 | }; |
| 45 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; |
| 46 | |
| 47 | extern unsigned int sysctl_numa_balancing_scan_delay; |
| 48 | extern unsigned int sysctl_numa_balancing_scan_period_min; |
| 49 | extern unsigned int sysctl_numa_balancing_scan_period_max; |
| 50 | extern unsigned int sysctl_numa_balancing_scan_period_reset; |
| 51 | extern unsigned int sysctl_numa_balancing_scan_size; |
| 52 | extern unsigned int sysctl_numa_balancing_settle_count; |
| 53 | |
| 54 | #ifdef CONFIG_SCHED_DEBUG |
| 55 | extern unsigned int sysctl_sched_migration_cost; |
| 56 | extern unsigned int sysctl_sched_nr_migrate; |
| 57 | extern unsigned int sysctl_sched_time_avg; |
| 58 | extern unsigned int sysctl_timer_migration; |
| 59 | extern unsigned int sysctl_sched_shares_window; |
| 60 | |
| 61 | int sched_proc_update_handler(struct ctl_table *table, int write, |
| 62 | void __user *buffer, size_t *length, |
| 63 | loff_t *ppos); |
| 64 | #endif |
| 65 | #ifdef CONFIG_SCHED_DEBUG |
| 66 | static inline unsigned int get_sysctl_timer_migration(void) |
| 67 | { |
| 68 | return sysctl_timer_migration; |
| 69 | } |
| 70 | #else |
| 71 | static inline unsigned int get_sysctl_timer_migration(void) |
| 72 | { |
| 73 | return 1; |
| 74 | } |
| 75 | #endif |
Clark Williams | ce0dbbb | 2013-02-07 09:47:04 -0600 | [diff] [blame] | 76 | |
| 77 | /* |
| 78 | * control realtime throttling: |
| 79 | * |
| 80 | * /proc/sys/kernel/sched_rt_period_us |
| 81 | * /proc/sys/kernel/sched_rt_runtime_us |
| 82 | */ |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 83 | extern unsigned int sysctl_sched_rt_period; |
| 84 | extern int sysctl_sched_rt_runtime; |
| 85 | |
| 86 | #ifdef CONFIG_CFS_BANDWIDTH |
| 87 | extern unsigned int sysctl_sched_cfs_bandwidth_slice; |
| 88 | #endif |
| 89 | |
| 90 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 91 | extern unsigned int sysctl_sched_autogroup_enabled; |
| 92 | #endif |
| 93 | |
Clark Williams | ce0dbbb | 2013-02-07 09:47:04 -0600 | [diff] [blame] | 94 | extern int sched_rr_timeslice; |
| 95 | |
| 96 | extern int sched_rr_handler(struct ctl_table *table, int write, |
| 97 | void __user *buffer, size_t *lenp, |
| 98 | loff_t *ppos); |
| 99 | |
| 100 | extern int sched_rt_handler(struct ctl_table *table, int write, |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 101 | void __user *buffer, size_t *lenp, |
| 102 | loff_t *ppos); |
| 103 | |
| 104 | #endif /* _SCHED_SYSCTL_H */ |