blob: a30b172df6e1a760905f83c2136ac35f4611320f [file] [log] [blame]
Clark Williams8bd75c72013-02-07 09:47:07 -06001#ifndef _SCHED_RT_H
2#define _SCHED_RT_H
3
Dongsheng Yang5c228072014-01-27 17:15:37 -05004#include <linux/sched/prio.h>
Clark Williams8bd75c72013-02-07 09:47:07 -06005
6static inline int rt_prio(int prio)
7{
8 if (unlikely(prio < MAX_RT_PRIO))
9 return 1;
10 return 0;
11}
12
13static inline int rt_task(struct task_struct *p)
14{
15 return rt_prio(p->prio);
16}
17
18#ifdef CONFIG_RT_MUTEXES
19extern int rt_mutex_getprio(struct task_struct *p);
20extern void rt_mutex_setprio(struct task_struct *p, int prio);
Thomas Gleixner0782e632015-05-05 19:49:49 +020021extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
Dario Faggioli2d3d8912013-11-07 14:43:44 +010022extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
Clark Williams8bd75c72013-02-07 09:47:07 -060023extern void rt_mutex_adjust_pi(struct task_struct *p);
24static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
25{
26 return tsk->pi_blocked_on != NULL;
27}
28#else
29static inline int rt_mutex_getprio(struct task_struct *p)
30{
31 return p->normal_prio;
32}
Thomas Gleixnerc365c292014-02-07 20:58:42 +010033
Thomas Gleixner0782e632015-05-05 19:49:49 +020034static inline int rt_mutex_get_effective_prio(struct task_struct *task,
35 int newprio)
Thomas Gleixnerc365c292014-02-07 20:58:42 +010036{
Thomas Gleixner0782e632015-05-05 19:49:49 +020037 return newprio;
Thomas Gleixnerc365c292014-02-07 20:58:42 +010038}
39
Dario Faggioli2d3d8912013-11-07 14:43:44 +010040static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
41{
42 return NULL;
43}
Clark Williams8bd75c72013-02-07 09:47:07 -060044# define rt_mutex_adjust_pi(p) do { } while (0)
45static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
46{
47 return false;
48}
49#endif
50
51extern void normalize_rt_tasks(void);
52
53
Clark Williamsbc681592013-02-22 09:20:11 -080054/*
55 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
56 * Timeslices get refilled after they expire.
57 */
58#define RR_TIMESLICE (100 * HZ / 1000)
59
Clark Williams8bd75c72013-02-07 09:47:07 -060060#endif /* _SCHED_RT_H */