mm: page_alloc: use unsigned int for order in more places
[linux-2.6-block.git] / include / linux / pagemap.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
3e9f45bd 14#include <linux/bitops.h>
e286781d 15#include <linux/hardirq.h> /* for in_interrupt() */
8edf344c 16#include <linux/hugetlb_inline.h>
1da177e4
LT
17
18/*
19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
20 * allocation mode flags.
21 */
9a896c9a
LS
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
9a896c9a 26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
18468d93 27 AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */
91b0abe3 28 AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
9a896c9a 29};
1da177e4 30
3e9f45bd
GC
31static inline void mapping_set_error(struct address_space *mapping, int error)
32{
2185e69f 33 if (unlikely(error)) {
3e9f45bd
GC
34 if (error == -ENOSPC)
35 set_bit(AS_ENOSPC, &mapping->flags);
36 else
37 set_bit(AS_EIO, &mapping->flags);
38 }
39}
40
ba9ddf49
LS
41static inline void mapping_set_unevictable(struct address_space *mapping)
42{
43 set_bit(AS_UNEVICTABLE, &mapping->flags);
44}
45
89e004ea
LS
46static inline void mapping_clear_unevictable(struct address_space *mapping)
47{
48 clear_bit(AS_UNEVICTABLE, &mapping->flags);
49}
50
ba9ddf49
LS
51static inline int mapping_unevictable(struct address_space *mapping)
52{
088e5465 53 if (mapping)
89e004ea
LS
54 return test_bit(AS_UNEVICTABLE, &mapping->flags);
55 return !!mapping;
ba9ddf49 56}
ba9ddf49 57
18468d93
RA
58static inline void mapping_set_balloon(struct address_space *mapping)
59{
60 set_bit(AS_BALLOON_MAP, &mapping->flags);
61}
62
63static inline void mapping_clear_balloon(struct address_space *mapping)
64{
65 clear_bit(AS_BALLOON_MAP, &mapping->flags);
66}
67
68static inline int mapping_balloon(struct address_space *mapping)
69{
70 return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
71}
72
91b0abe3
JW
73static inline void mapping_set_exiting(struct address_space *mapping)
74{
75 set_bit(AS_EXITING, &mapping->flags);
76}
77
78static inline int mapping_exiting(struct address_space *mapping)
79{
80 return test_bit(AS_EXITING, &mapping->flags);
81}
82
dd0fc66f 83static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4 84{
260b2367 85 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
1da177e4
LT
86}
87
88/*
89 * This is non-atomic. Only to be used before the mapping is activated.
90 * Probably needs a barrier...
91 */
260b2367 92static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4 93{
260b2367
AV
94 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
95 (__force unsigned long)mask;
1da177e4
LT
96}
97
98/*
99 * The page cache can done in larger chunks than
100 * one page, because it allows for more efficient
101 * throughput (it can then be mapped into user
102 * space in smaller chunks for same flexibility).
103 *
104 * Or rather, it _will_ be done in larger chunks.
105 */
106#define PAGE_CACHE_SHIFT PAGE_SHIFT
107#define PAGE_CACHE_SIZE PAGE_SIZE
108#define PAGE_CACHE_MASK PAGE_MASK
109#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
110
111#define page_cache_get(page) get_page(page)
112#define page_cache_release(page) put_page(page)
113void release_pages(struct page **pages, int nr, int cold);
114
e286781d
NP
115/*
116 * speculatively take a reference to a page.
117 * If the page is free (_count == 0), then _count is untouched, and 0
118 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
119 *
120 * This function must be called inside the same rcu_read_lock() section as has
121 * been used to lookup the page in the pagecache radix-tree (or page table):
122 * this allows allocators to use a synchronize_rcu() to stabilize _count.
123 *
124 * Unless an RCU grace period has passed, the count of all pages coming out
125 * of the allocator must be considered unstable. page_count may return higher
126 * than expected, and put_page must be able to do the right thing when the
127 * page has been finished with, no matter what it is subsequently allocated
128 * for (because put_page is what is used here to drop an invalid speculative
129 * reference).
130 *
131 * This is the interesting part of the lockless pagecache (and lockless
132 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
133 * has the following pattern:
134 * 1. find page in radix tree
135 * 2. conditionally increment refcount
136 * 3. check the page is still in pagecache (if no, goto 1)
137 *
138 * Remove-side that cares about stability of _count (eg. reclaim) has the
139 * following (with tree_lock held for write):
140 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
141 * B. remove page from pagecache
142 * C. free the page
143 *
144 * There are 2 critical interleavings that matter:
145 * - 2 runs before A: in this case, A sees elevated refcount and bails out
146 * - A runs before 2: in this case, 2 sees zero refcount and retries;
147 * subsequently, B will complete and 1 will find no page, causing the
148 * lookup to return NULL.
149 *
150 * It is possible that between 1 and 2, the page is removed then the exact same
151 * page is inserted into the same position in pagecache. That's OK: the
152 * old find_get_page using tree_lock could equally have run before or after
153 * such a re-insertion, depending on order that locks are granted.
154 *
155 * Lookups racing against pagecache insertion isn't a big problem: either 1
156 * will find the page or it will not. Likewise, the old find_get_page could run
157 * either before the insertion or afterwards, depending on timing.
158 */
159static inline int page_cache_get_speculative(struct page *page)
160{
161 VM_BUG_ON(in_interrupt());
162
8375ad98 163#ifdef CONFIG_TINY_RCU
bdd4e85d 164# ifdef CONFIG_PREEMPT_COUNT
e286781d
NP
165 VM_BUG_ON(!in_atomic());
166# endif
167 /*
168 * Preempt must be disabled here - we rely on rcu_read_lock doing
169 * this for us.
170 *
171 * Pagecache won't be truncated from interrupt context, so if we have
172 * found a page in the radix tree here, we have pinned its refcount by
173 * disabling preempt, and hence no need for the "speculative get" that
174 * SMP requires.
175 */
309381fe 176 VM_BUG_ON_PAGE(page_count(page) == 0, page);
e286781d
NP
177 atomic_inc(&page->_count);
178
179#else
180 if (unlikely(!get_page_unless_zero(page))) {
181 /*
182 * Either the page has been freed, or will be freed.
183 * In either case, retry here and the caller should
184 * do the right thing (see comments above).
185 */
186 return 0;
187 }
188#endif
309381fe 189 VM_BUG_ON_PAGE(PageTail(page), page);
e286781d
NP
190
191 return 1;
192}
193
ce0ad7f0
NP
194/*
195 * Same as above, but add instead of inc (could just be merged)
196 */
197static inline int page_cache_add_speculative(struct page *page, int count)
198{
199 VM_BUG_ON(in_interrupt());
200
b560d8ad 201#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
bdd4e85d 202# ifdef CONFIG_PREEMPT_COUNT
ce0ad7f0
NP
203 VM_BUG_ON(!in_atomic());
204# endif
309381fe 205 VM_BUG_ON_PAGE(page_count(page) == 0, page);
ce0ad7f0
NP
206 atomic_add(count, &page->_count);
207
208#else
209 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
210 return 0;
211#endif
309381fe 212 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
ce0ad7f0
NP
213
214 return 1;
215}
216
e286781d
NP
217static inline int page_freeze_refs(struct page *page, int count)
218{
219 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
220}
221
222static inline void page_unfreeze_refs(struct page *page, int count)
223{
309381fe 224 VM_BUG_ON_PAGE(page_count(page) != 0, page);
e286781d
NP
225 VM_BUG_ON(count == 0);
226
227 atomic_set(&page->_count, count);
228}
229
44110fe3 230#ifdef CONFIG_NUMA
2ae88149 231extern struct page *__page_cache_alloc(gfp_t gfp);
44110fe3 232#else
2ae88149
NP
233static inline struct page *__page_cache_alloc(gfp_t gfp)
234{
235 return alloc_pages(gfp, 0);
236}
237#endif
238
1da177e4
LT
239static inline struct page *page_cache_alloc(struct address_space *x)
240{
2ae88149 241 return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4
LT
242}
243
244static inline struct page *page_cache_alloc_cold(struct address_space *x)
245{
2ae88149 246 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
1da177e4
LT
247}
248
7b1de586
WF
249static inline struct page *page_cache_alloc_readahead(struct address_space *x)
250{
251 return __page_cache_alloc(mapping_gfp_mask(x) |
252 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
253}
254
1da177e4
LT
255typedef int filler_t(void *, struct page *);
256
e7b563bb
JW
257pgoff_t page_cache_next_hole(struct address_space *mapping,
258 pgoff_t index, unsigned long max_scan);
259pgoff_t page_cache_prev_hole(struct address_space *mapping,
260 pgoff_t index, unsigned long max_scan);
261
0cd6144a
JW
262struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
263struct page *find_get_page(struct address_space *mapping, pgoff_t offset);
264struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
265struct page *find_lock_page(struct address_space *mapping, pgoff_t offset);
266struct page *find_or_create_page(struct address_space *mapping, pgoff_t index,
267 gfp_t gfp_mask);
268unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
269 unsigned int nr_entries, struct page **entries,
270 pgoff_t *indices);
1da177e4
LT
271unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
272 unsigned int nr_pages, struct page **pages);
ebf43500
JA
273unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
274 unsigned int nr_pages, struct page **pages);
1da177e4
LT
275unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
276 int tag, unsigned int nr_pages, struct page **pages);
277
54566b2c
NP
278struct page *grab_cache_page_write_begin(struct address_space *mapping,
279 pgoff_t index, unsigned flags);
afddba49 280
1da177e4
LT
281/*
282 * Returns locked page at given index in given cache, creating it if needed.
283 */
57f6b96c
FW
284static inline struct page *grab_cache_page(struct address_space *mapping,
285 pgoff_t index)
1da177e4
LT
286{
287 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
288}
289
290extern struct page * grab_cache_page_nowait(struct address_space *mapping,
57f6b96c 291 pgoff_t index);
1da177e4 292extern struct page * read_cache_page(struct address_space *mapping,
5e5358e7 293 pgoff_t index, filler_t *filler, void *data);
0531b2aa
LT
294extern struct page * read_cache_page_gfp(struct address_space *mapping,
295 pgoff_t index, gfp_t gfp_mask);
1da177e4
LT
296extern int read_cache_pages(struct address_space *mapping,
297 struct list_head *pages, filler_t *filler, void *data);
298
090d2b18 299static inline struct page *read_mapping_page(struct address_space *mapping,
5e5358e7 300 pgoff_t index, void *data)
090d2b18
PE
301{
302 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
303 return read_cache_page(mapping, index, filler, data);
304}
305
1da177e4
LT
306/*
307 * Return byte-offset into filesystem object for page.
308 */
309static inline loff_t page_offset(struct page *page)
310{
311 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
312}
313
f981c595
MG
314static inline loff_t page_file_offset(struct page *page)
315{
316 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
317}
318
0fe6e20b
NH
319extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
320 unsigned long address);
321
1da177e4
LT
322static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
323 unsigned long address)
324{
0fe6e20b
NH
325 pgoff_t pgoff;
326 if (unlikely(is_vm_hugetlb_page(vma)))
327 return linear_hugepage_index(vma, address);
328 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1da177e4
LT
329 pgoff += vma->vm_pgoff;
330 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
331}
332
b3c97528
HH
333extern void __lock_page(struct page *page);
334extern int __lock_page_killable(struct page *page);
d065bd81
ML
335extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
336 unsigned int flags);
b3c97528 337extern void unlock_page(struct page *page);
1da177e4 338
f45840b5 339static inline void __set_page_locked(struct page *page)
529ae9aa 340{
f45840b5 341 __set_bit(PG_locked, &page->flags);
529ae9aa
NP
342}
343
f45840b5 344static inline void __clear_page_locked(struct page *page)
529ae9aa 345{
f45840b5 346 __clear_bit(PG_locked, &page->flags);
529ae9aa
NP
347}
348
349static inline int trylock_page(struct page *page)
350{
8413ac9d 351 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
529ae9aa
NP
352}
353
db37648c
NP
354/*
355 * lock_page may only be called if we have the page's inode pinned.
356 */
1da177e4
LT
357static inline void lock_page(struct page *page)
358{
359 might_sleep();
529ae9aa 360 if (!trylock_page(page))
1da177e4
LT
361 __lock_page(page);
362}
db37648c 363
2687a356
MW
364/*
365 * lock_page_killable is like lock_page but can be interrupted by fatal
366 * signals. It returns 0 if it locked the page and -EINTR if it was
367 * killed while waiting.
368 */
369static inline int lock_page_killable(struct page *page)
370{
371 might_sleep();
529ae9aa 372 if (!trylock_page(page))
2687a356
MW
373 return __lock_page_killable(page);
374 return 0;
375}
376
d065bd81
ML
377/*
378 * lock_page_or_retry - Lock the page, unless this would block and the
379 * caller indicated that it can handle a retry.
380 */
381static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
382 unsigned int flags)
383{
384 might_sleep();
385 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
386}
387
1da177e4
LT
388/*
389 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
390 * Never use this directly!
391 */
b3c97528 392extern void wait_on_page_bit(struct page *page, int bit_nr);
1da177e4 393
f62e00cc
KM
394extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
395
396static inline int wait_on_page_locked_killable(struct page *page)
397{
398 if (PageLocked(page))
399 return wait_on_page_bit_killable(page, PG_locked);
400 return 0;
401}
402
1da177e4
LT
403/*
404 * Wait for a page to be unlocked.
405 *
406 * This must be called with the caller "holding" the page,
407 * ie with increased "page->count" so that the page won't
408 * go away during the wait..
409 */
410static inline void wait_on_page_locked(struct page *page)
411{
412 if (PageLocked(page))
413 wait_on_page_bit(page, PG_locked);
414}
415
416/*
417 * Wait for a page to complete writeback
418 */
419static inline void wait_on_page_writeback(struct page *page)
420{
421 if (PageWriteback(page))
422 wait_on_page_bit(page, PG_writeback);
423}
424
425extern void end_page_writeback(struct page *page);
1d1d1a76 426void wait_for_stable_page(struct page *page);
1da177e4 427
57d99845
MW
428void page_endio(struct page *page, int rw, int err);
429
385e1ca5
DH
430/*
431 * Add an arbitrary waiter to a page's wait queue
432 */
433extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
434
1da177e4
LT
435/*
436 * Fault a userspace page into pagetables. Return non-zero on a fault.
437 *
438 * This assumes that two userspace pages are always sufficient. That's
439 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
440 */
441static inline int fault_in_pages_writeable(char __user *uaddr, int size)
442{
443 int ret;
444
08291429
NP
445 if (unlikely(size == 0))
446 return 0;
447
1da177e4
LT
448 /*
449 * Writing zeroes into userspace here is OK, because we know that if
450 * the zero gets there, we'll be overwriting it.
451 */
452 ret = __put_user(0, uaddr);
453 if (ret == 0) {
454 char __user *end = uaddr + size - 1;
455
456 /*
457 * If the page was already mapped, this will get a cache miss
458 * for sure, so try to avoid doing it.
459 */
460 if (((unsigned long)uaddr & PAGE_MASK) !=
461 ((unsigned long)end & PAGE_MASK))
f56f821f 462 ret = __put_user(0, end);
1da177e4
LT
463 }
464 return ret;
465}
466
08291429 467static inline int fault_in_pages_readable(const char __user *uaddr, int size)
1da177e4
LT
468{
469 volatile char c;
470 int ret;
471
08291429
NP
472 if (unlikely(size == 0))
473 return 0;
474
1da177e4
LT
475 ret = __get_user(c, uaddr);
476 if (ret == 0) {
477 const char __user *end = uaddr + size - 1;
478
479 if (((unsigned long)uaddr & PAGE_MASK) !=
627295e4 480 ((unsigned long)end & PAGE_MASK)) {
f56f821f 481 ret = __get_user(c, end);
627295e4
AK
482 (void)c;
483 }
1da177e4 484 }
08291429 485 return ret;
1da177e4
LT
486}
487
f56f821f
DV
488/*
489 * Multipage variants of the above prefault helpers, useful if more than
490 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
491 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
492 * filemap.c hotpaths.
493 */
494static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
495{
af2e8409 496 int ret = 0;
9923777d 497 char __user *end = uaddr + size - 1;
f56f821f
DV
498
499 if (unlikely(size == 0))
af2e8409 500 return ret;
f56f821f
DV
501
502 /*
503 * Writing zeroes into userspace here is OK, because we know that if
504 * the zero gets there, we'll be overwriting it.
505 */
506 while (uaddr <= end) {
507 ret = __put_user(0, uaddr);
508 if (ret != 0)
509 return ret;
510 uaddr += PAGE_SIZE;
511 }
512
513 /* Check whether the range spilled into the next page. */
514 if (((unsigned long)uaddr & PAGE_MASK) ==
515 ((unsigned long)end & PAGE_MASK))
516 ret = __put_user(0, end);
517
518 return ret;
519}
520
521static inline int fault_in_multipages_readable(const char __user *uaddr,
522 int size)
523{
524 volatile char c;
af2e8409 525 int ret = 0;
f56f821f
DV
526 const char __user *end = uaddr + size - 1;
527
528 if (unlikely(size == 0))
af2e8409 529 return ret;
f56f821f
DV
530
531 while (uaddr <= end) {
532 ret = __get_user(c, uaddr);
533 if (ret != 0)
534 return ret;
535 uaddr += PAGE_SIZE;
536 }
537
538 /* Check whether the range spilled into the next page. */
539 if (((unsigned long)uaddr & PAGE_MASK) ==
540 ((unsigned long)end & PAGE_MASK)) {
541 ret = __get_user(c, end);
542 (void)c;
543 }
544
545 return ret;
546}
547
529ae9aa
NP
548int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
549 pgoff_t index, gfp_t gfp_mask);
550int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
551 pgoff_t index, gfp_t gfp_mask);
97cecb5a 552extern void delete_from_page_cache(struct page *page);
91b0abe3 553extern void __delete_from_page_cache(struct page *page, void *shadow);
ef6a3c63 554int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
529ae9aa
NP
555
556/*
557 * Like add_to_page_cache_locked, but used to add newly allocated pages:
f45840b5 558 * the page is new, so we can just run __set_page_locked() against it.
529ae9aa
NP
559 */
560static inline int add_to_page_cache(struct page *page,
561 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
562{
563 int error;
564
f45840b5 565 __set_page_locked(page);
529ae9aa
NP
566 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
567 if (unlikely(error))
f45840b5 568 __clear_page_locked(page);
529ae9aa
NP
569 return error;
570}
571
1da177e4 572#endif /* _LINUX_PAGEMAP_H */