blob: 3bd668414f61a593e9f40ff8364a3dce63d78d01 [file] [log] [blame]
Ingo Molnar9ccd27c2017-02-03 21:11:09 +01001#ifndef _LINUX_SCHED_RT_H
2#define _LINUX_SCHED_RT_H
Clark Williams8bd75c72013-02-07 09:47:07 -06003
Ingo Molnar9ccd27c2017-02-03 21:11:09 +01004#include <linux/sched.h>
5
6struct task_struct;
Clark Williams8bd75c72013-02-07 09:47:07 -06007
8static inline int rt_prio(int prio)
9{
10 if (unlikely(prio < MAX_RT_PRIO))
11 return 1;
12 return 0;
13}
14
15static inline int rt_task(struct task_struct *p)
16{
17 return rt_prio(p->prio);
18}
19
20#ifdef CONFIG_RT_MUTEXES
21extern int rt_mutex_getprio(struct task_struct *p);
22extern void rt_mutex_setprio(struct task_struct *p, int prio);
Thomas Gleixner0782e632015-05-05 19:49:49 +020023extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
Dario Faggioli2d3d8912013-11-07 14:43:44 +010024extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
Clark Williams8bd75c72013-02-07 09:47:07 -060025extern void rt_mutex_adjust_pi(struct task_struct *p);
26static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
27{
28 return tsk->pi_blocked_on != NULL;
29}
30#else
31static inline int rt_mutex_getprio(struct task_struct *p)
32{
33 return p->normal_prio;
34}
Thomas Gleixnerc365c292014-02-07 20:58:42 +010035
Thomas Gleixner0782e632015-05-05 19:49:49 +020036static inline int rt_mutex_get_effective_prio(struct task_struct *task,
37 int newprio)
Thomas Gleixnerc365c292014-02-07 20:58:42 +010038{
Thomas Gleixner0782e632015-05-05 19:49:49 +020039 return newprio;
Thomas Gleixnerc365c292014-02-07 20:58:42 +010040}
41
Dario Faggioli2d3d8912013-11-07 14:43:44 +010042static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
43{
44 return NULL;
45}
Clark Williams8bd75c72013-02-07 09:47:07 -060046# define rt_mutex_adjust_pi(p) do { } while (0)
47static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
48{
49 return false;
50}
51#endif
52
53extern void normalize_rt_tasks(void);
54
55
Clark Williamsbc681592013-02-22 09:20:11 -080056/*
57 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
58 * Timeslices get refilled after they expire.
59 */
60#define RR_TIMESLICE (100 * HZ / 1000)
61
Ingo Molnar9ccd27c2017-02-03 21:11:09 +010062#endif /* _LINUX_SCHED_RT_H */