Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_MEMPOLICY_H |
2 | #define _LINUX_MEMPOLICY_H 1 | |
3 | ||
4 | #include <linux/errno.h> | |
5 | ||
6 | /* | |
7 | * NUMA memory policies for Linux. | |
8 | * Copyright 2003,2004 Andi Kleen SuSE Labs | |
9 | */ | |
10 | ||
11 | /* Policies */ | |
12 | #define MPOL_DEFAULT 0 | |
13 | #define MPOL_PREFERRED 1 | |
14 | #define MPOL_BIND 2 | |
15 | #define MPOL_INTERLEAVE 3 | |
16 | ||
17 | #define MPOL_MAX MPOL_INTERLEAVE | |
18 | ||
19 | /* Flags for get_mem_policy */ | |
20 | #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ | |
21 | #define MPOL_F_ADDR (1<<1) /* look up vma using address */ | |
754af6f5 | 22 | #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */ |
1da177e4 LT |
23 | |
24 | /* Flags for mbind */ | |
25 | #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ | |
dc9aa5b9 CL |
26 | #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ |
27 | #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ | |
28 | #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ | |
1da177e4 LT |
29 | |
30 | #ifdef __KERNEL__ | |
31 | ||
1da177e4 | 32 | #include <linux/mmzone.h> |
1da177e4 LT |
33 | #include <linux/slab.h> |
34 | #include <linux/rbtree.h> | |
35 | #include <linux/spinlock.h> | |
dfcd3c0d | 36 | #include <linux/nodemask.h> |
1da177e4 LT |
37 | |
38 | struct vm_area_struct; | |
45b35a5c | 39 | struct mm_struct; |
1da177e4 LT |
40 | |
41 | #ifdef CONFIG_NUMA | |
42 | ||
43 | /* | |
44 | * Describe a memory policy. | |
45 | * | |
46 | * A mempolicy can be either associated with a process or with a VMA. | |
47 | * For VMA related allocations the VMA policy is preferred, otherwise | |
48 | * the process policy is used. Interrupts ignore the memory policy | |
49 | * of the current process. | |
50 | * | |
51 | * Locking policy for interlave: | |
52 | * In process context there is no locking because only the process accesses | |
53 | * its own state. All vma manipulation is somewhat protected by a down_read on | |
b8072f09 | 54 | * mmap_sem. |
1da177e4 LT |
55 | * |
56 | * Freeing policy: | |
57 | * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. | |
58 | * All other policies don't have any external state. mpol_free() handles this. | |
59 | * | |
60 | * Copying policy objects: | |
61 | * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. | |
62 | */ | |
63 | struct mempolicy { | |
64 | atomic_t refcnt; | |
65 | short policy; /* See MPOL_* above */ | |
66 | union { | |
67 | struct zonelist *zonelist; /* bind */ | |
68 | short preferred_node; /* preferred */ | |
dfcd3c0d | 69 | nodemask_t nodes; /* interleave */ |
1da177e4 LT |
70 | /* undefined for default */ |
71 | } v; | |
74cb2155 | 72 | nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ |
1da177e4 LT |
73 | }; |
74 | ||
75 | /* | |
76 | * Support for managing mempolicy data objects (clone, copy, destroy) | |
77 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. | |
78 | */ | |
79 | ||
80 | extern void __mpol_free(struct mempolicy *pol); | |
81 | static inline void mpol_free(struct mempolicy *pol) | |
82 | { | |
83 | if (pol) | |
84 | __mpol_free(pol); | |
85 | } | |
86 | ||
87 | extern struct mempolicy *__mpol_copy(struct mempolicy *pol); | |
88 | static inline struct mempolicy *mpol_copy(struct mempolicy *pol) | |
89 | { | |
90 | if (pol) | |
91 | pol = __mpol_copy(pol); | |
92 | return pol; | |
93 | } | |
94 | ||
95 | #define vma_policy(vma) ((vma)->vm_policy) | |
96 | #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) | |
97 | ||
98 | static inline void mpol_get(struct mempolicy *pol) | |
99 | { | |
100 | if (pol) | |
101 | atomic_inc(&pol->refcnt); | |
102 | } | |
103 | ||
104 | extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b); | |
105 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | |
106 | { | |
107 | if (a == b) | |
108 | return 1; | |
109 | return __mpol_equal(a, b); | |
110 | } | |
111 | #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b)) | |
112 | ||
113 | /* Could later add inheritance of the process policy here. */ | |
114 | ||
115 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) | |
116 | ||
1da177e4 LT |
117 | /* |
118 | * Tree of shared policies for a shared memory region. | |
119 | * Maintain the policies in a pseudo mm that contains vmas. The vmas | |
120 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not | |
121 | * bytes, so that we can work with shared memory segments bigger than | |
122 | * unsigned long. | |
123 | */ | |
124 | ||
125 | struct sp_node { | |
126 | struct rb_node nd; | |
127 | unsigned long start, end; | |
128 | struct mempolicy *policy; | |
129 | }; | |
130 | ||
131 | struct shared_policy { | |
132 | struct rb_root root; | |
133 | spinlock_t lock; | |
134 | }; | |
135 | ||
7339ff83 RH |
136 | void mpol_shared_policy_init(struct shared_policy *info, int policy, |
137 | nodemask_t *nodes); | |
1da177e4 LT |
138 | int mpol_set_shared_policy(struct shared_policy *info, |
139 | struct vm_area_struct *vma, | |
140 | struct mempolicy *new); | |
141 | void mpol_free_shared_policy(struct shared_policy *p); | |
142 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, | |
143 | unsigned long idx); | |
144 | ||
145 | extern void numa_default_policy(void); | |
146 | extern void numa_policy_init(void); | |
74cb2155 PJ |
147 | extern void mpol_rebind_task(struct task_struct *tsk, |
148 | const nodemask_t *new); | |
4225399a | 149 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
c61afb18 | 150 | extern void mpol_fix_fork_child_flag(struct task_struct *p); |
4225399a PJ |
151 | #define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x)) |
152 | ||
faf6bbcf | 153 | #ifdef CONFIG_CPUSETS |
4225399a PJ |
154 | #define current_cpuset_is_being_rebound() \ |
155 | (cpuset_being_rebound == current->cpuset) | |
156 | #else | |
157 | #define current_cpuset_is_being_rebound() 0 | |
158 | #endif | |
159 | ||
d42c6997 | 160 | extern struct mempolicy default_policy; |
5da7ca86 | 161 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
480eccf9 | 162 | unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol); |
dc85da15 | 163 | extern unsigned slab_node(struct mempolicy *policy); |
1da177e4 | 164 | |
2f6726e5 | 165 | extern enum zone_type policy_zone; |
4be38e35 | 166 | |
2f6726e5 | 167 | static inline void check_highest_zone(enum zone_type k) |
4be38e35 | 168 | { |
b377fd39 | 169 | if (k > policy_zone && k != ZONE_MOVABLE) |
4be38e35 CL |
170 | policy_zone = k; |
171 | } | |
172 | ||
39743889 CL |
173 | int do_migrate_pages(struct mm_struct *mm, |
174 | const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); | |
175 | ||
4225399a PJ |
176 | extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */ |
177 | ||
1da177e4 LT |
178 | #else |
179 | ||
180 | struct mempolicy {}; | |
181 | ||
182 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | |
183 | { | |
184 | return 1; | |
185 | } | |
186 | #define vma_mpol_equal(a,b) 1 | |
187 | ||
188 | #define mpol_set_vma_default(vma) do {} while(0) | |
189 | ||
190 | static inline void mpol_free(struct mempolicy *p) | |
191 | { | |
192 | } | |
193 | ||
194 | static inline void mpol_get(struct mempolicy *pol) | |
195 | { | |
196 | } | |
197 | ||
198 | static inline struct mempolicy *mpol_copy(struct mempolicy *old) | |
199 | { | |
200 | return NULL; | |
201 | } | |
202 | ||
1da177e4 LT |
203 | struct shared_policy {}; |
204 | ||
205 | static inline int mpol_set_shared_policy(struct shared_policy *info, | |
206 | struct vm_area_struct *vma, | |
207 | struct mempolicy *new) | |
208 | { | |
209 | return -EINVAL; | |
210 | } | |
211 | ||
7339ff83 RH |
212 | static inline void mpol_shared_policy_init(struct shared_policy *info, |
213 | int policy, nodemask_t *nodes) | |
1da177e4 LT |
214 | { |
215 | } | |
216 | ||
217 | static inline void mpol_free_shared_policy(struct shared_policy *p) | |
218 | { | |
219 | } | |
220 | ||
221 | static inline struct mempolicy * | |
222 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) | |
223 | { | |
224 | return NULL; | |
225 | } | |
226 | ||
227 | #define vma_policy(vma) NULL | |
228 | #define vma_set_policy(vma, pol) do {} while(0) | |
229 | ||
230 | static inline void numa_policy_init(void) | |
231 | { | |
232 | } | |
233 | ||
234 | static inline void numa_default_policy(void) | |
235 | { | |
236 | } | |
237 | ||
74cb2155 | 238 | static inline void mpol_rebind_task(struct task_struct *tsk, |
68860ec1 PJ |
239 | const nodemask_t *new) |
240 | { | |
241 | } | |
242 | ||
4225399a PJ |
243 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) |
244 | { | |
245 | } | |
246 | ||
c61afb18 PJ |
247 | static inline void mpol_fix_fork_child_flag(struct task_struct *p) |
248 | { | |
249 | } | |
250 | ||
4225399a PJ |
251 | #define set_cpuset_being_rebound(x) do {} while (0) |
252 | ||
5da7ca86 | 253 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
480eccf9 | 254 | unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol) |
5da7ca86 | 255 | { |
396faf03 | 256 | return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags); |
5da7ca86 CL |
257 | } |
258 | ||
45b07ef3 PJ |
259 | static inline int do_migrate_pages(struct mm_struct *mm, |
260 | const nodemask_t *from_nodes, | |
261 | const nodemask_t *to_nodes, int flags) | |
262 | { | |
263 | return 0; | |
264 | } | |
265 | ||
4be38e35 CL |
266 | static inline void check_highest_zone(int k) |
267 | { | |
268 | } | |
1da177e4 LT |
269 | #endif /* CONFIG_NUMA */ |
270 | #endif /* __KERNEL__ */ | |
271 | ||
272 | #endif |