aio: async page waiting
[linux-block.git] / include / linux / pagemap.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
3e9f45bd 14#include <linux/bitops.h>
e286781d 15#include <linux/hardirq.h> /* for in_interrupt() */
1da177e4
LT
16
17/*
18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
19 * allocation mode flags.
20 */
9a896c9a
LS
21enum mapping_flags {
22 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
23 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
9a896c9a 25 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
9a896c9a 26};
1da177e4 27
3e9f45bd
GC
28static inline void mapping_set_error(struct address_space *mapping, int error)
29{
2185e69f 30 if (unlikely(error)) {
3e9f45bd
GC
31 if (error == -ENOSPC)
32 set_bit(AS_ENOSPC, &mapping->flags);
33 else
34 set_bit(AS_EIO, &mapping->flags);
35 }
36}
37
ba9ddf49
LS
38static inline void mapping_set_unevictable(struct address_space *mapping)
39{
40 set_bit(AS_UNEVICTABLE, &mapping->flags);
41}
42
89e004ea
LS
43static inline void mapping_clear_unevictable(struct address_space *mapping)
44{
45 clear_bit(AS_UNEVICTABLE, &mapping->flags);
46}
47
ba9ddf49
LS
48static inline int mapping_unevictable(struct address_space *mapping)
49{
89e004ea
LS
50 if (likely(mapping))
51 return test_bit(AS_UNEVICTABLE, &mapping->flags);
52 return !!mapping;
ba9ddf49 53}
ba9ddf49 54
dd0fc66f 55static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4 56{
260b2367 57 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
1da177e4
LT
58}
59
60/*
61 * This is non-atomic. Only to be used before the mapping is activated.
62 * Probably needs a barrier...
63 */
260b2367 64static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4 65{
260b2367
AV
66 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
67 (__force unsigned long)mask;
1da177e4
LT
68}
69
70/*
71 * The page cache can done in larger chunks than
72 * one page, because it allows for more efficient
73 * throughput (it can then be mapped into user
74 * space in smaller chunks for same flexibility).
75 *
76 * Or rather, it _will_ be done in larger chunks.
77 */
78#define PAGE_CACHE_SHIFT PAGE_SHIFT
79#define PAGE_CACHE_SIZE PAGE_SIZE
80#define PAGE_CACHE_MASK PAGE_MASK
81#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
82
83#define page_cache_get(page) get_page(page)
84#define page_cache_release(page) put_page(page)
85void release_pages(struct page **pages, int nr, int cold);
86
e286781d
NP
87/*
88 * speculatively take a reference to a page.
89 * If the page is free (_count == 0), then _count is untouched, and 0
90 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
91 *
92 * This function must be called inside the same rcu_read_lock() section as has
93 * been used to lookup the page in the pagecache radix-tree (or page table):
94 * this allows allocators to use a synchronize_rcu() to stabilize _count.
95 *
96 * Unless an RCU grace period has passed, the count of all pages coming out
97 * of the allocator must be considered unstable. page_count may return higher
98 * than expected, and put_page must be able to do the right thing when the
99 * page has been finished with, no matter what it is subsequently allocated
100 * for (because put_page is what is used here to drop an invalid speculative
101 * reference).
102 *
103 * This is the interesting part of the lockless pagecache (and lockless
104 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
105 * has the following pattern:
106 * 1. find page in radix tree
107 * 2. conditionally increment refcount
108 * 3. check the page is still in pagecache (if no, goto 1)
109 *
110 * Remove-side that cares about stability of _count (eg. reclaim) has the
111 * following (with tree_lock held for write):
112 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
113 * B. remove page from pagecache
114 * C. free the page
115 *
116 * There are 2 critical interleavings that matter:
117 * - 2 runs before A: in this case, A sees elevated refcount and bails out
118 * - A runs before 2: in this case, 2 sees zero refcount and retries;
119 * subsequently, B will complete and 1 will find no page, causing the
120 * lookup to return NULL.
121 *
122 * It is possible that between 1 and 2, the page is removed then the exact same
123 * page is inserted into the same position in pagecache. That's OK: the
124 * old find_get_page using tree_lock could equally have run before or after
125 * such a re-insertion, depending on order that locks are granted.
126 *
127 * Lookups racing against pagecache insertion isn't a big problem: either 1
128 * will find the page or it will not. Likewise, the old find_get_page could run
129 * either before the insertion or afterwards, depending on timing.
130 */
131static inline int page_cache_get_speculative(struct page *page)
132{
133 VM_BUG_ON(in_interrupt());
134
b560d8ad 135#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
e286781d
NP
136# ifdef CONFIG_PREEMPT
137 VM_BUG_ON(!in_atomic());
138# endif
139 /*
140 * Preempt must be disabled here - we rely on rcu_read_lock doing
141 * this for us.
142 *
143 * Pagecache won't be truncated from interrupt context, so if we have
144 * found a page in the radix tree here, we have pinned its refcount by
145 * disabling preempt, and hence no need for the "speculative get" that
146 * SMP requires.
147 */
148 VM_BUG_ON(page_count(page) == 0);
149 atomic_inc(&page->_count);
150
151#else
152 if (unlikely(!get_page_unless_zero(page))) {
153 /*
154 * Either the page has been freed, or will be freed.
155 * In either case, retry here and the caller should
156 * do the right thing (see comments above).
157 */
158 return 0;
159 }
160#endif
161 VM_BUG_ON(PageTail(page));
162
163 return 1;
164}
165
ce0ad7f0
NP
166/*
167 * Same as above, but add instead of inc (could just be merged)
168 */
169static inline int page_cache_add_speculative(struct page *page, int count)
170{
171 VM_BUG_ON(in_interrupt());
172
b560d8ad 173#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
ce0ad7f0
NP
174# ifdef CONFIG_PREEMPT
175 VM_BUG_ON(!in_atomic());
176# endif
177 VM_BUG_ON(page_count(page) == 0);
178 atomic_add(count, &page->_count);
179
180#else
181 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
182 return 0;
183#endif
184 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
185
186 return 1;
187}
188
e286781d
NP
189static inline int page_freeze_refs(struct page *page, int count)
190{
191 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
192}
193
194static inline void page_unfreeze_refs(struct page *page, int count)
195{
196 VM_BUG_ON(page_count(page) != 0);
197 VM_BUG_ON(count == 0);
198
199 atomic_set(&page->_count, count);
200}
201
44110fe3 202#ifdef CONFIG_NUMA
2ae88149 203extern struct page *__page_cache_alloc(gfp_t gfp);
44110fe3 204#else
2ae88149
NP
205static inline struct page *__page_cache_alloc(gfp_t gfp)
206{
207 return alloc_pages(gfp, 0);
208}
209#endif
210
1da177e4
LT
211static inline struct page *page_cache_alloc(struct address_space *x)
212{
2ae88149 213 return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4
LT
214}
215
216static inline struct page *page_cache_alloc_cold(struct address_space *x)
217{
2ae88149 218 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
1da177e4
LT
219}
220
221typedef int filler_t(void *, struct page *);
222
223extern struct page * find_get_page(struct address_space *mapping,
57f6b96c 224 pgoff_t index);
1da177e4 225extern struct page * find_lock_page(struct address_space *mapping,
57f6b96c 226 pgoff_t index);
1da177e4 227extern struct page * find_or_create_page(struct address_space *mapping,
57f6b96c 228 pgoff_t index, gfp_t gfp_mask);
1da177e4
LT
229unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
230 unsigned int nr_pages, struct page **pages);
ebf43500
JA
231unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
232 unsigned int nr_pages, struct page **pages);
1da177e4
LT
233unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
234 int tag, unsigned int nr_pages, struct page **pages);
235
54566b2c
NP
236struct page *grab_cache_page_write_begin(struct address_space *mapping,
237 pgoff_t index, unsigned flags);
afddba49 238
1da177e4
LT
239/*
240 * Returns locked page at given index in given cache, creating it if needed.
241 */
57f6b96c
FW
242static inline struct page *grab_cache_page(struct address_space *mapping,
243 pgoff_t index)
1da177e4
LT
244{
245 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
246}
247
248extern struct page * grab_cache_page_nowait(struct address_space *mapping,
57f6b96c 249 pgoff_t index);
6fe6900e 250extern struct page * read_cache_page_async(struct address_space *mapping,
57f6b96c 251 pgoff_t index, filler_t *filler,
6fe6900e 252 void *data);
1da177e4 253extern struct page * read_cache_page(struct address_space *mapping,
57f6b96c 254 pgoff_t index, filler_t *filler,
1da177e4
LT
255 void *data);
256extern int read_cache_pages(struct address_space *mapping,
257 struct list_head *pages, filler_t *filler, void *data);
258
6fe6900e
NP
259static inline struct page *read_mapping_page_async(
260 struct address_space *mapping,
57f6b96c 261 pgoff_t index, void *data)
6fe6900e
NP
262{
263 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
264 return read_cache_page_async(mapping, index, filler, data);
265}
266
090d2b18 267static inline struct page *read_mapping_page(struct address_space *mapping,
57f6b96c 268 pgoff_t index, void *data)
090d2b18
PE
269{
270 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
271 return read_cache_page(mapping, index, filler, data);
272}
273
1da177e4
LT
274/*
275 * Return byte-offset into filesystem object for page.
276 */
277static inline loff_t page_offset(struct page *page)
278{
279 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
280}
281
282static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
283 unsigned long address)
284{
285 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
286 pgoff += vma->vm_pgoff;
287 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
288}
289
b3c97528
HH
290extern void __lock_page(struct page *page);
291extern int __lock_page_killable(struct page *page);
292extern void __lock_page_nosync(struct page *page);
3001eabb 293extern int __lock_page_wq(struct page *page, struct wait_bit_queue *);
b3c97528 294extern void unlock_page(struct page *page);
1da177e4 295
f45840b5 296static inline void __set_page_locked(struct page *page)
529ae9aa 297{
f45840b5 298 __set_bit(PG_locked, &page->flags);
529ae9aa
NP
299}
300
f45840b5 301static inline void __clear_page_locked(struct page *page)
529ae9aa 302{
f45840b5 303 __clear_bit(PG_locked, &page->flags);
529ae9aa
NP
304}
305
306static inline int trylock_page(struct page *page)
307{
8413ac9d 308 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
529ae9aa
NP
309}
310
db37648c
NP
311/*
312 * lock_page may only be called if we have the page's inode pinned.
313 */
1da177e4
LT
314static inline void lock_page(struct page *page)
315{
316 might_sleep();
529ae9aa 317 if (!trylock_page(page))
1da177e4
LT
318 __lock_page(page);
319}
db37648c 320
2687a356
MW
321/*
322 * lock_page_killable is like lock_page but can be interrupted by fatal
323 * signals. It returns 0 if it locked the page and -EINTR if it was
324 * killed while waiting.
325 */
326static inline int lock_page_killable(struct page *page)
327{
328 might_sleep();
529ae9aa 329 if (!trylock_page(page))
2687a356
MW
330 return __lock_page_killable(page);
331 return 0;
332}
333
db37648c
NP
334/*
335 * lock_page_nosync should only be used if we can't pin the page's inode.
336 * Doesn't play quite so well with block device plugging.
337 */
338static inline void lock_page_nosync(struct page *page)
339{
340 might_sleep();
529ae9aa 341 if (!trylock_page(page))
db37648c
NP
342 __lock_page_nosync(page);
343}
3001eabb
JA
344
345static inline int lock_page_wq(struct page *page, struct wait_bit_queue *wq)
346{
347 if (!trylock_page(page))
348 return __lock_page_wq(page, wq);
349
350 return 0;
351}
1da177e4
LT
352
353/*
354 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
355 * Never use this directly!
356 */
b3c97528 357extern void wait_on_page_bit(struct page *page, int bit_nr);
1da177e4
LT
358
359/*
360 * Wait for a page to be unlocked.
361 *
362 * This must be called with the caller "holding" the page,
363 * ie with increased "page->count" so that the page won't
364 * go away during the wait..
365 */
366static inline void wait_on_page_locked(struct page *page)
367{
368 if (PageLocked(page))
369 wait_on_page_bit(page, PG_locked);
370}
371
3001eabb
JA
372extern int wait_on_page_bit_wq(struct page *, int, struct wait_bit_queue *);
373
374static inline int wait_on_page_locked_wq(struct page *page,
375 struct wait_bit_queue *wait)
376{
377 if (PageLocked(page))
378 return wait_on_page_bit_wq(page, PG_locked, wait);
379
380 return 0;
381}
382
1da177e4
LT
383/*
384 * Wait for a page to complete writeback
385 */
386static inline void wait_on_page_writeback(struct page *page)
387{
388 if (PageWriteback(page))
389 wait_on_page_bit(page, PG_writeback);
390}
391
392extern void end_page_writeback(struct page *page);
393
385e1ca5
DH
394/*
395 * Add an arbitrary waiter to a page's wait queue
396 */
397extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
398
1da177e4
LT
399/*
400 * Fault a userspace page into pagetables. Return non-zero on a fault.
401 *
402 * This assumes that two userspace pages are always sufficient. That's
403 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
404 */
405static inline int fault_in_pages_writeable(char __user *uaddr, int size)
406{
407 int ret;
408
08291429
NP
409 if (unlikely(size == 0))
410 return 0;
411
1da177e4
LT
412 /*
413 * Writing zeroes into userspace here is OK, because we know that if
414 * the zero gets there, we'll be overwriting it.
415 */
416 ret = __put_user(0, uaddr);
417 if (ret == 0) {
418 char __user *end = uaddr + size - 1;
419
420 /*
421 * If the page was already mapped, this will get a cache miss
422 * for sure, so try to avoid doing it.
423 */
424 if (((unsigned long)uaddr & PAGE_MASK) !=
425 ((unsigned long)end & PAGE_MASK))
426 ret = __put_user(0, end);
427 }
428 return ret;
429}
430
08291429 431static inline int fault_in_pages_readable(const char __user *uaddr, int size)
1da177e4
LT
432{
433 volatile char c;
434 int ret;
435
08291429
NP
436 if (unlikely(size == 0))
437 return 0;
438
1da177e4
LT
439 ret = __get_user(c, uaddr);
440 if (ret == 0) {
441 const char __user *end = uaddr + size - 1;
442
443 if (((unsigned long)uaddr & PAGE_MASK) !=
444 ((unsigned long)end & PAGE_MASK))
08291429 445 ret = __get_user(c, end);
1da177e4 446 }
08291429 447 return ret;
1da177e4
LT
448}
449
529ae9aa
NP
450int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
451 pgoff_t index, gfp_t gfp_mask);
452int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
453 pgoff_t index, gfp_t gfp_mask);
454extern void remove_from_page_cache(struct page *page);
455extern void __remove_from_page_cache(struct page *page);
456
457/*
458 * Like add_to_page_cache_locked, but used to add newly allocated pages:
f45840b5 459 * the page is new, so we can just run __set_page_locked() against it.
529ae9aa
NP
460 */
461static inline int add_to_page_cache(struct page *page,
462 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
463{
464 int error;
465
f45840b5 466 __set_page_locked(page);
529ae9aa
NP
467 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
468 if (unlikely(error))
f45840b5 469 __clear_page_locked(page);
529ae9aa
NP
470 return error;
471}
472
1da177e4 473#endif /* _LINUX_PAGEMAP_H */