blob: e7dd04a84ba89d79507d461cee5b839269ff0b8a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ingo Molnar6a3827d2017-02-08 18:51:31 +01002#ifndef _LINUX_SCHED_NUMA_BALANCING_H
3#define _LINUX_SCHED_NUMA_BALANCING_H
4
Ingo Molnar56470282017-02-02 12:39:17 +01005/*
6 * This is the interface between the scheduler and the MM that
7 * implements memory access pattern based NUMA-balancing:
8 */
9
Ingo Molnar6a3827d2017-02-08 18:51:31 +010010#include <linux/sched.h>
11
Ingo Molnar56470282017-02-02 12:39:17 +010012#define TNF_MIGRATED 0x01
13#define TNF_NO_GROUP 0x02
14#define TNF_SHARED 0x04
15#define TNF_FAULT_LOCAL 0x08
16#define TNF_MIGRATE_FAIL 0x10
17
18#ifdef CONFIG_NUMA_BALANCING
19extern void task_numa_fault(int last_node, int node, int pages, int flags);
20extern pid_t task_numa_group_id(struct task_struct *p);
21extern void set_numabalancing_state(bool enabled);
22extern void task_numa_free(struct task_struct *p);
23extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
24 int src_nid, int dst_cpu);
25#else
26static inline void task_numa_fault(int last_node, int node, int pages,
27 int flags)
28{
29}
30static inline pid_t task_numa_group_id(struct task_struct *p)
31{
32 return 0;
33}
34static inline void set_numabalancing_state(bool enabled)
35{
36}
37static inline void task_numa_free(struct task_struct *p)
38{
39}
40static inline bool should_numa_migrate_memory(struct task_struct *p,
41 struct page *page, int src_nid, int dst_cpu)
42{
43 return true;
44}
45#endif
46
Ingo Molnar6a3827d2017-02-08 18:51:31 +010047#endif /* _LINUX_SCHED_NUMA_BALANCING_H */