David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 1 | /* |
| 2 | * NUMA memory policies for Linux. |
| 3 | * Copyright 2003,2004 Andi Kleen SuSE Labs |
| 4 | */ |
| 5 | #ifndef _UAPI_LINUX_MEMPOLICY_H |
| 6 | #define _UAPI_LINUX_MEMPOLICY_H |
| 7 | |
| 8 | #include <linux/errno.h> |
| 9 | |
| 10 | |
| 11 | /* |
| 12 | * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are |
| 13 | * passed by the user to either set_mempolicy() or mbind() in an 'int' actual. |
| 14 | * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags. |
| 15 | */ |
| 16 | |
| 17 | /* Policies */ |
| 18 | enum { |
| 19 | MPOL_DEFAULT, |
| 20 | MPOL_PREFERRED, |
| 21 | MPOL_BIND, |
| 22 | MPOL_INTERLEAVE, |
Peter Zijlstra | 479e280 | 2012-10-25 14:16:28 +0200 | [diff] [blame] | 23 | MPOL_LOCAL, |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 24 | MPOL_MAX, /* always last member of enum */ |
| 25 | }; |
| 26 | |
| 27 | enum mpol_rebind_step { |
| 28 | MPOL_REBIND_ONCE, /* do rebind work at once(not by two step) */ |
| 29 | MPOL_REBIND_STEP1, /* first step(set all the newly nodes) */ |
| 30 | MPOL_REBIND_STEP2, /* second step(clean all the disallowed nodes)*/ |
| 31 | MPOL_REBIND_NSTEP, |
| 32 | }; |
| 33 | |
| 34 | /* Flags for set_mempolicy */ |
| 35 | #define MPOL_F_STATIC_NODES (1 << 15) |
| 36 | #define MPOL_F_RELATIVE_NODES (1 << 14) |
| 37 | |
| 38 | /* |
| 39 | * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to |
| 40 | * either set_mempolicy() or mbind(). |
| 41 | */ |
| 42 | #define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES) |
| 43 | |
| 44 | /* Flags for get_mempolicy */ |
| 45 | #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ |
| 46 | #define MPOL_F_ADDR (1<<1) /* look up vma using address */ |
| 47 | #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */ |
| 48 | |
| 49 | /* Flags for mbind */ |
| 50 | #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ |
Lee Schermerhorn | b24f53a | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 51 | #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform |
| 52 | to policy */ |
| 53 | #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to policy */ |
| 54 | #define MPOL_MF_LAZY (1<<3) /* Modifies '_MOVE: lazy migrate on fault */ |
| 55 | #define MPOL_MF_INTERNAL (1<<4) /* Internal flags start here */ |
| 56 | |
| 57 | #define MPOL_MF_VALID (MPOL_MF_STRICT | \ |
| 58 | MPOL_MF_MOVE | \ |
Mel Gorman | a720094 | 2012-11-16 09:37:58 +0000 | [diff] [blame] | 59 | MPOL_MF_MOVE_ALL) |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 60 | |
| 61 | /* |
| 62 | * Internal flags that share the struct mempolicy flags word with |
| 63 | * "mode flags". These flags are allocated from bit 0 up, as they |
| 64 | * are never OR'ed into the mode in mempolicy API arguments. |
| 65 | */ |
| 66 | #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ |
| 67 | #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ |
| 68 | #define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */ |
Lee Schermerhorn | 771fb4d | 2012-10-25 14:16:30 +0200 | [diff] [blame] | 69 | #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ |
Mel Gorman | 5606e38 | 2012-11-02 18:19:13 +0000 | [diff] [blame] | 70 | #define MPOL_F_MORON (1 << 4) /* Migrate On pte_numa Reference On Node */ |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 71 | |
| 72 | |
| 73 | #endif /* _UAPI_LINUX_MEMPOLICY_H */ |