Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H | |
3 | ||
4 | /* | |
5 | * Copyright 1995 Linus Torvalds | |
6 | */ | |
7 | #include <linux/mm.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/highmem.h> | |
11 | #include <linux/compiler.h> | |
12 | #include <asm/uaccess.h> | |
13 | #include <linux/gfp.h> | |
3e9f45bd | 14 | #include <linux/bitops.h> |
e286781d | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
1da177e4 LT |
16 | |
17 | /* | |
18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | |
19 | * allocation mode flags. | |
20 | */ | |
21 | #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ | |
22 | #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ | |
7906d00c | 23 | #define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ |
1da177e4 | 24 | |
3e9f45bd GC |
25 | static inline void mapping_set_error(struct address_space *mapping, int error) |
26 | { | |
2185e69f | 27 | if (unlikely(error)) { |
3e9f45bd GC |
28 | if (error == -ENOSPC) |
29 | set_bit(AS_ENOSPC, &mapping->flags); | |
30 | else | |
31 | set_bit(AS_EIO, &mapping->flags); | |
32 | } | |
33 | } | |
34 | ||
ba9ddf49 LS |
35 | #ifdef CONFIG_UNEVICTABLE_LRU |
36 | #define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */ | |
37 | ||
38 | static inline void mapping_set_unevictable(struct address_space *mapping) | |
39 | { | |
40 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
41 | } | |
42 | ||
43 | static inline int mapping_unevictable(struct address_space *mapping) | |
44 | { | |
45 | if (mapping && (mapping->flags & AS_UNEVICTABLE)) | |
46 | return 1; | |
47 | return 0; | |
48 | } | |
49 | #else | |
50 | static inline void mapping_set_unevictable(struct address_space *mapping) { } | |
51 | static inline int mapping_unevictable(struct address_space *mapping) | |
52 | { | |
53 | return 0; | |
54 | } | |
55 | #endif | |
56 | ||
dd0fc66f | 57 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 58 | { |
260b2367 | 59 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
1da177e4 LT |
60 | } |
61 | ||
62 | /* | |
63 | * This is non-atomic. Only to be used before the mapping is activated. | |
64 | * Probably needs a barrier... | |
65 | */ | |
260b2367 | 66 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 67 | { |
260b2367 AV |
68 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
69 | (__force unsigned long)mask; | |
1da177e4 LT |
70 | } |
71 | ||
72 | /* | |
73 | * The page cache can done in larger chunks than | |
74 | * one page, because it allows for more efficient | |
75 | * throughput (it can then be mapped into user | |
76 | * space in smaller chunks for same flexibility). | |
77 | * | |
78 | * Or rather, it _will_ be done in larger chunks. | |
79 | */ | |
80 | #define PAGE_CACHE_SHIFT PAGE_SHIFT | |
81 | #define PAGE_CACHE_SIZE PAGE_SIZE | |
82 | #define PAGE_CACHE_MASK PAGE_MASK | |
83 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) | |
84 | ||
85 | #define page_cache_get(page) get_page(page) | |
86 | #define page_cache_release(page) put_page(page) | |
87 | void release_pages(struct page **pages, int nr, int cold); | |
88 | ||
e286781d NP |
89 | /* |
90 | * speculatively take a reference to a page. | |
91 | * If the page is free (_count == 0), then _count is untouched, and 0 | |
92 | * is returned. Otherwise, _count is incremented by 1 and 1 is returned. | |
93 | * | |
94 | * This function must be called inside the same rcu_read_lock() section as has | |
95 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
96 | * this allows allocators to use a synchronize_rcu() to stabilize _count. | |
97 | * | |
98 | * Unless an RCU grace period has passed, the count of all pages coming out | |
99 | * of the allocator must be considered unstable. page_count may return higher | |
100 | * than expected, and put_page must be able to do the right thing when the | |
101 | * page has been finished with, no matter what it is subsequently allocated | |
102 | * for (because put_page is what is used here to drop an invalid speculative | |
103 | * reference). | |
104 | * | |
105 | * This is the interesting part of the lockless pagecache (and lockless | |
106 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
107 | * has the following pattern: | |
108 | * 1. find page in radix tree | |
109 | * 2. conditionally increment refcount | |
110 | * 3. check the page is still in pagecache (if no, goto 1) | |
111 | * | |
112 | * Remove-side that cares about stability of _count (eg. reclaim) has the | |
113 | * following (with tree_lock held for write): | |
114 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | |
115 | * B. remove page from pagecache | |
116 | * C. free the page | |
117 | * | |
118 | * There are 2 critical interleavings that matter: | |
119 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
120 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
121 | * subsequently, B will complete and 1 will find no page, causing the | |
122 | * lookup to return NULL. | |
123 | * | |
124 | * It is possible that between 1 and 2, the page is removed then the exact same | |
125 | * page is inserted into the same position in pagecache. That's OK: the | |
126 | * old find_get_page using tree_lock could equally have run before or after | |
127 | * such a re-insertion, depending on order that locks are granted. | |
128 | * | |
129 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
130 | * will find the page or it will not. Likewise, the old find_get_page could run | |
131 | * either before the insertion or afterwards, depending on timing. | |
132 | */ | |
133 | static inline int page_cache_get_speculative(struct page *page) | |
134 | { | |
135 | VM_BUG_ON(in_interrupt()); | |
136 | ||
137 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | |
138 | # ifdef CONFIG_PREEMPT | |
139 | VM_BUG_ON(!in_atomic()); | |
140 | # endif | |
141 | /* | |
142 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
143 | * this for us. | |
144 | * | |
145 | * Pagecache won't be truncated from interrupt context, so if we have | |
146 | * found a page in the radix tree here, we have pinned its refcount by | |
147 | * disabling preempt, and hence no need for the "speculative get" that | |
148 | * SMP requires. | |
149 | */ | |
150 | VM_BUG_ON(page_count(page) == 0); | |
151 | atomic_inc(&page->_count); | |
152 | ||
153 | #else | |
154 | if (unlikely(!get_page_unless_zero(page))) { | |
155 | /* | |
156 | * Either the page has been freed, or will be freed. | |
157 | * In either case, retry here and the caller should | |
158 | * do the right thing (see comments above). | |
159 | */ | |
160 | return 0; | |
161 | } | |
162 | #endif | |
163 | VM_BUG_ON(PageTail(page)); | |
164 | ||
165 | return 1; | |
166 | } | |
167 | ||
ce0ad7f0 NP |
168 | /* |
169 | * Same as above, but add instead of inc (could just be merged) | |
170 | */ | |
171 | static inline int page_cache_add_speculative(struct page *page, int count) | |
172 | { | |
173 | VM_BUG_ON(in_interrupt()); | |
174 | ||
175 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | |
176 | # ifdef CONFIG_PREEMPT | |
177 | VM_BUG_ON(!in_atomic()); | |
178 | # endif | |
179 | VM_BUG_ON(page_count(page) == 0); | |
180 | atomic_add(count, &page->_count); | |
181 | ||
182 | #else | |
183 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | |
184 | return 0; | |
185 | #endif | |
186 | VM_BUG_ON(PageCompound(page) && page != compound_head(page)); | |
187 | ||
188 | return 1; | |
189 | } | |
190 | ||
e286781d NP |
191 | static inline int page_freeze_refs(struct page *page, int count) |
192 | { | |
193 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | |
194 | } | |
195 | ||
196 | static inline void page_unfreeze_refs(struct page *page, int count) | |
197 | { | |
198 | VM_BUG_ON(page_count(page) != 0); | |
199 | VM_BUG_ON(count == 0); | |
200 | ||
201 | atomic_set(&page->_count, count); | |
202 | } | |
203 | ||
44110fe3 | 204 | #ifdef CONFIG_NUMA |
2ae88149 | 205 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 206 | #else |
2ae88149 NP |
207 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
208 | { | |
209 | return alloc_pages(gfp, 0); | |
210 | } | |
211 | #endif | |
212 | ||
1da177e4 LT |
213 | static inline struct page *page_cache_alloc(struct address_space *x) |
214 | { | |
2ae88149 | 215 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
216 | } |
217 | ||
218 | static inline struct page *page_cache_alloc_cold(struct address_space *x) | |
219 | { | |
2ae88149 | 220 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
1da177e4 LT |
221 | } |
222 | ||
223 | typedef int filler_t(void *, struct page *); | |
224 | ||
225 | extern struct page * find_get_page(struct address_space *mapping, | |
57f6b96c | 226 | pgoff_t index); |
1da177e4 | 227 | extern struct page * find_lock_page(struct address_space *mapping, |
57f6b96c | 228 | pgoff_t index); |
1da177e4 | 229 | extern struct page * find_or_create_page(struct address_space *mapping, |
57f6b96c | 230 | pgoff_t index, gfp_t gfp_mask); |
1da177e4 LT |
231 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
232 | unsigned int nr_pages, struct page **pages); | |
ebf43500 JA |
233 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
234 | unsigned int nr_pages, struct page **pages); | |
1da177e4 LT |
235 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
236 | int tag, unsigned int nr_pages, struct page **pages); | |
237 | ||
afddba49 NP |
238 | struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index); |
239 | ||
1da177e4 LT |
240 | /* |
241 | * Returns locked page at given index in given cache, creating it if needed. | |
242 | */ | |
57f6b96c FW |
243 | static inline struct page *grab_cache_page(struct address_space *mapping, |
244 | pgoff_t index) | |
1da177e4 LT |
245 | { |
246 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
247 | } | |
248 | ||
249 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, | |
57f6b96c | 250 | pgoff_t index); |
6fe6900e | 251 | extern struct page * read_cache_page_async(struct address_space *mapping, |
57f6b96c | 252 | pgoff_t index, filler_t *filler, |
6fe6900e | 253 | void *data); |
1da177e4 | 254 | extern struct page * read_cache_page(struct address_space *mapping, |
57f6b96c | 255 | pgoff_t index, filler_t *filler, |
1da177e4 LT |
256 | void *data); |
257 | extern int read_cache_pages(struct address_space *mapping, | |
258 | struct list_head *pages, filler_t *filler, void *data); | |
259 | ||
6fe6900e NP |
260 | static inline struct page *read_mapping_page_async( |
261 | struct address_space *mapping, | |
57f6b96c | 262 | pgoff_t index, void *data) |
6fe6900e NP |
263 | { |
264 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
265 | return read_cache_page_async(mapping, index, filler, data); | |
266 | } | |
267 | ||
090d2b18 | 268 | static inline struct page *read_mapping_page(struct address_space *mapping, |
57f6b96c | 269 | pgoff_t index, void *data) |
090d2b18 PE |
270 | { |
271 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
272 | return read_cache_page(mapping, index, filler, data); | |
273 | } | |
274 | ||
1da177e4 LT |
275 | /* |
276 | * Return byte-offset into filesystem object for page. | |
277 | */ | |
278 | static inline loff_t page_offset(struct page *page) | |
279 | { | |
280 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; | |
281 | } | |
282 | ||
283 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, | |
284 | unsigned long address) | |
285 | { | |
286 | pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
287 | pgoff += vma->vm_pgoff; | |
288 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
289 | } | |
290 | ||
b3c97528 HH |
291 | extern void __lock_page(struct page *page); |
292 | extern int __lock_page_killable(struct page *page); | |
293 | extern void __lock_page_nosync(struct page *page); | |
294 | extern void unlock_page(struct page *page); | |
1da177e4 | 295 | |
529ae9aa NP |
296 | static inline void set_page_locked(struct page *page) |
297 | { | |
298 | set_bit(PG_locked, &page->flags); | |
299 | } | |
300 | ||
301 | static inline void clear_page_locked(struct page *page) | |
302 | { | |
303 | clear_bit(PG_locked, &page->flags); | |
304 | } | |
305 | ||
306 | static inline int trylock_page(struct page *page) | |
307 | { | |
308 | return !test_and_set_bit(PG_locked, &page->flags); | |
309 | } | |
310 | ||
db37648c NP |
311 | /* |
312 | * lock_page may only be called if we have the page's inode pinned. | |
313 | */ | |
1da177e4 LT |
314 | static inline void lock_page(struct page *page) |
315 | { | |
316 | might_sleep(); | |
529ae9aa | 317 | if (!trylock_page(page)) |
1da177e4 LT |
318 | __lock_page(page); |
319 | } | |
db37648c | 320 | |
2687a356 MW |
321 | /* |
322 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
323 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
324 | * killed while waiting. | |
325 | */ | |
326 | static inline int lock_page_killable(struct page *page) | |
327 | { | |
328 | might_sleep(); | |
529ae9aa | 329 | if (!trylock_page(page)) |
2687a356 MW |
330 | return __lock_page_killable(page); |
331 | return 0; | |
332 | } | |
333 | ||
db37648c NP |
334 | /* |
335 | * lock_page_nosync should only be used if we can't pin the page's inode. | |
336 | * Doesn't play quite so well with block device plugging. | |
337 | */ | |
338 | static inline void lock_page_nosync(struct page *page) | |
339 | { | |
340 | might_sleep(); | |
529ae9aa | 341 | if (!trylock_page(page)) |
db37648c NP |
342 | __lock_page_nosync(page); |
343 | } | |
1da177e4 LT |
344 | |
345 | /* | |
346 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. | |
347 | * Never use this directly! | |
348 | */ | |
b3c97528 | 349 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
1da177e4 LT |
350 | |
351 | /* | |
352 | * Wait for a page to be unlocked. | |
353 | * | |
354 | * This must be called with the caller "holding" the page, | |
355 | * ie with increased "page->count" so that the page won't | |
356 | * go away during the wait.. | |
357 | */ | |
358 | static inline void wait_on_page_locked(struct page *page) | |
359 | { | |
360 | if (PageLocked(page)) | |
361 | wait_on_page_bit(page, PG_locked); | |
362 | } | |
363 | ||
364 | /* | |
365 | * Wait for a page to complete writeback | |
366 | */ | |
367 | static inline void wait_on_page_writeback(struct page *page) | |
368 | { | |
369 | if (PageWriteback(page)) | |
370 | wait_on_page_bit(page, PG_writeback); | |
371 | } | |
372 | ||
373 | extern void end_page_writeback(struct page *page); | |
374 | ||
375 | /* | |
376 | * Fault a userspace page into pagetables. Return non-zero on a fault. | |
377 | * | |
378 | * This assumes that two userspace pages are always sufficient. That's | |
379 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. | |
380 | */ | |
381 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
382 | { | |
383 | int ret; | |
384 | ||
08291429 NP |
385 | if (unlikely(size == 0)) |
386 | return 0; | |
387 | ||
1da177e4 LT |
388 | /* |
389 | * Writing zeroes into userspace here is OK, because we know that if | |
390 | * the zero gets there, we'll be overwriting it. | |
391 | */ | |
392 | ret = __put_user(0, uaddr); | |
393 | if (ret == 0) { | |
394 | char __user *end = uaddr + size - 1; | |
395 | ||
396 | /* | |
397 | * If the page was already mapped, this will get a cache miss | |
398 | * for sure, so try to avoid doing it. | |
399 | */ | |
400 | if (((unsigned long)uaddr & PAGE_MASK) != | |
401 | ((unsigned long)end & PAGE_MASK)) | |
402 | ret = __put_user(0, end); | |
403 | } | |
404 | return ret; | |
405 | } | |
406 | ||
08291429 | 407 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
1da177e4 LT |
408 | { |
409 | volatile char c; | |
410 | int ret; | |
411 | ||
08291429 NP |
412 | if (unlikely(size == 0)) |
413 | return 0; | |
414 | ||
1da177e4 LT |
415 | ret = __get_user(c, uaddr); |
416 | if (ret == 0) { | |
417 | const char __user *end = uaddr + size - 1; | |
418 | ||
419 | if (((unsigned long)uaddr & PAGE_MASK) != | |
420 | ((unsigned long)end & PAGE_MASK)) | |
08291429 | 421 | ret = __get_user(c, end); |
1da177e4 | 422 | } |
08291429 | 423 | return ret; |
1da177e4 LT |
424 | } |
425 | ||
529ae9aa NP |
426 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
427 | pgoff_t index, gfp_t gfp_mask); | |
428 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
429 | pgoff_t index, gfp_t gfp_mask); | |
430 | extern void remove_from_page_cache(struct page *page); | |
431 | extern void __remove_from_page_cache(struct page *page); | |
432 | ||
433 | /* | |
434 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
435 | * the page is new, so we can just run set_page_locked() against it. | |
436 | */ | |
437 | static inline int add_to_page_cache(struct page *page, | |
438 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
439 | { | |
440 | int error; | |
441 | ||
442 | set_page_locked(page); | |
443 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | |
444 | if (unlikely(error)) | |
445 | clear_page_locked(page); | |
446 | return error; | |
447 | } | |
448 | ||
1da177e4 | 449 | #endif /* _LINUX_PAGEMAP_H */ |