blob: 68e69acc29b9570b10ecf8a5892ea62c48700c5c [file] [log] [blame]
Peter Zijlstrae26af0e2009-09-11 12:31:23 +02001/*
Ingo Molnar51e03042009-09-16 08:54:45 +02002 * Only give sleepers 50% of their service deficit. This allows
3 * them to run sooner, but does not allow tons of sleepers to
4 * rip the spread apart.
5 */
6SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
Peter Zijlstrae26af0e2009-09-11 12:31:23 +02007
8/*
Peter Zijlstrae26af0e2009-09-11 12:31:23 +02009 * Place new tasks ahead so that they do not starve already running
10 * tasks
11 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020012SCHED_FEAT(START_DEBIT, 1)
Peter Zijlstrae26af0e2009-09-11 12:31:23 +020013
14/*
15 * Should wakeups try to preempt running tasks.
16 */
17SCHED_FEAT(WAKEUP_PREEMPT, 1)
18
19/*
Peter Zijlstrae26af0e2009-09-11 12:31:23 +020020 * Based on load and program behaviour, see if it makes sense to place
21 * a newly woken task on the same cpu as the task that woke it --
22 * improve cache locality. Typically used with SYNC wakeups as
23 * generated by pipes and the like, see also SYNC_WAKEUPS.
24 */
25SCHED_FEAT(AFFINE_WAKEUPS, 1)
26
27/*
28 * Prefer to schedule the task we woke last (assuming it failed
29 * wakeup-preemption), since its likely going to consume data we
30 * touched, increases cache locality.
31 */
Mike Galbraith0ec9fab2009-09-15 15:07:03 +020032SCHED_FEAT(NEXT_BUDDY, 0)
Peter Zijlstrae26af0e2009-09-11 12:31:23 +020033
34/*
35 * Prefer to schedule the task that ran last (when we did
36 * wake-preempt) as that likely will touch the same data, increases
37 * cache locality.
38 */
39SCHED_FEAT(LAST_BUDDY, 1)
40
41/*
42 * Consider buddies to be cache hot, decreases the likelyness of a
43 * cache buddy being migrated away, increases cache locality.
44 */
45SCHED_FEAT(CACHE_HOT_BUDDY, 1)
46
Peter Zijlstra8e6598a2009-09-03 13:20:03 +020047/*
48 * Use arch dependent cpu power functions
49 */
50SCHED_FEAT(ARCH_POWER, 0)
51
Ingo Molnar0c4b83d2008-10-20 14:27:43 +020052SCHED_FEAT(HRTICK, 0)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020053SCHED_FEAT(DOUBLE_TICK, 0)
Peter Zijlstraefc2dea2008-08-20 12:44:55 +020054SCHED_FEAT(LB_BIAS, 1)
Peter Zijlstrae26af0e2009-09-11 12:31:23 +020055
56/*
57 * Spin-wait on mutex acquisition when the mutex owner is running on
58 * another cpu -- assumes that when the owner is running, it will soon
59 * release the lock. Decreases scheduling overhead.
60 */
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010061SCHED_FEAT(OWNER_SPIN, 1)
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -070062
63/*
64 * Decrement CPU power based on irq activity
65 */
66SCHED_FEAT(NONIRQ_POWER, 1)