Merge remote-tracking branch 'asoc/fix/max98357a' into asoc-linus
[linux-2.6-block.git] / include / linux / pagemap.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
3e9f45bd 14#include <linux/bitops.h>
e286781d 15#include <linux/hardirq.h> /* for in_interrupt() */
8edf344c 16#include <linux/hugetlb_inline.h>
1da177e4
LT
17
18/*
19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
20 * allocation mode flags.
21 */
9a896c9a
LS
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
9a896c9a 26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
9d1ba805 27 AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
9a896c9a 28};
1da177e4 29
3e9f45bd
GC
30static inline void mapping_set_error(struct address_space *mapping, int error)
31{
2185e69f 32 if (unlikely(error)) {
3e9f45bd
GC
33 if (error == -ENOSPC)
34 set_bit(AS_ENOSPC, &mapping->flags);
35 else
36 set_bit(AS_EIO, &mapping->flags);
37 }
38}
39
ba9ddf49
LS
40static inline void mapping_set_unevictable(struct address_space *mapping)
41{
42 set_bit(AS_UNEVICTABLE, &mapping->flags);
43}
44
89e004ea
LS
45static inline void mapping_clear_unevictable(struct address_space *mapping)
46{
47 clear_bit(AS_UNEVICTABLE, &mapping->flags);
48}
49
ba9ddf49
LS
50static inline int mapping_unevictable(struct address_space *mapping)
51{
088e5465 52 if (mapping)
89e004ea
LS
53 return test_bit(AS_UNEVICTABLE, &mapping->flags);
54 return !!mapping;
ba9ddf49 55}
ba9ddf49 56
91b0abe3
JW
57static inline void mapping_set_exiting(struct address_space *mapping)
58{
59 set_bit(AS_EXITING, &mapping->flags);
60}
61
62static inline int mapping_exiting(struct address_space *mapping)
63{
64 return test_bit(AS_EXITING, &mapping->flags);
65}
66
dd0fc66f 67static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4 68{
260b2367 69 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
1da177e4
LT
70}
71
72/*
73 * This is non-atomic. Only to be used before the mapping is activated.
74 * Probably needs a barrier...
75 */
260b2367 76static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4 77{
260b2367
AV
78 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
79 (__force unsigned long)mask;
1da177e4
LT
80}
81
82/*
50d8a189 83 * The page cache can be done in larger chunks than
1da177e4
LT
84 * one page, because it allows for more efficient
85 * throughput (it can then be mapped into user
86 * space in smaller chunks for same flexibility).
87 *
88 * Or rather, it _will_ be done in larger chunks.
89 */
90#define PAGE_CACHE_SHIFT PAGE_SHIFT
91#define PAGE_CACHE_SIZE PAGE_SIZE
92#define PAGE_CACHE_MASK PAGE_MASK
93#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
94
95#define page_cache_get(page) get_page(page)
96#define page_cache_release(page) put_page(page)
b745bc85 97void release_pages(struct page **pages, int nr, bool cold);
1da177e4 98
e286781d
NP
99/*
100 * speculatively take a reference to a page.
101 * If the page is free (_count == 0), then _count is untouched, and 0
102 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
103 *
104 * This function must be called inside the same rcu_read_lock() section as has
105 * been used to lookup the page in the pagecache radix-tree (or page table):
106 * this allows allocators to use a synchronize_rcu() to stabilize _count.
107 *
108 * Unless an RCU grace period has passed, the count of all pages coming out
109 * of the allocator must be considered unstable. page_count may return higher
110 * than expected, and put_page must be able to do the right thing when the
111 * page has been finished with, no matter what it is subsequently allocated
112 * for (because put_page is what is used here to drop an invalid speculative
113 * reference).
114 *
115 * This is the interesting part of the lockless pagecache (and lockless
116 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
117 * has the following pattern:
118 * 1. find page in radix tree
119 * 2. conditionally increment refcount
120 * 3. check the page is still in pagecache (if no, goto 1)
121 *
122 * Remove-side that cares about stability of _count (eg. reclaim) has the
123 * following (with tree_lock held for write):
124 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
125 * B. remove page from pagecache
126 * C. free the page
127 *
128 * There are 2 critical interleavings that matter:
129 * - 2 runs before A: in this case, A sees elevated refcount and bails out
130 * - A runs before 2: in this case, 2 sees zero refcount and retries;
131 * subsequently, B will complete and 1 will find no page, causing the
132 * lookup to return NULL.
133 *
134 * It is possible that between 1 and 2, the page is removed then the exact same
135 * page is inserted into the same position in pagecache. That's OK: the
136 * old find_get_page using tree_lock could equally have run before or after
137 * such a re-insertion, depending on order that locks are granted.
138 *
139 * Lookups racing against pagecache insertion isn't a big problem: either 1
140 * will find the page or it will not. Likewise, the old find_get_page could run
141 * either before the insertion or afterwards, depending on timing.
142 */
143static inline int page_cache_get_speculative(struct page *page)
144{
145 VM_BUG_ON(in_interrupt());
146
8375ad98 147#ifdef CONFIG_TINY_RCU
bdd4e85d 148# ifdef CONFIG_PREEMPT_COUNT
e286781d
NP
149 VM_BUG_ON(!in_atomic());
150# endif
151 /*
152 * Preempt must be disabled here - we rely on rcu_read_lock doing
153 * this for us.
154 *
155 * Pagecache won't be truncated from interrupt context, so if we have
156 * found a page in the radix tree here, we have pinned its refcount by
157 * disabling preempt, and hence no need for the "speculative get" that
158 * SMP requires.
159 */
309381fe 160 VM_BUG_ON_PAGE(page_count(page) == 0, page);
e286781d
NP
161 atomic_inc(&page->_count);
162
163#else
164 if (unlikely(!get_page_unless_zero(page))) {
165 /*
166 * Either the page has been freed, or will be freed.
167 * In either case, retry here and the caller should
168 * do the right thing (see comments above).
169 */
170 return 0;
171 }
172#endif
309381fe 173 VM_BUG_ON_PAGE(PageTail(page), page);
e286781d
NP
174
175 return 1;
176}
177
ce0ad7f0
NP
178/*
179 * Same as above, but add instead of inc (could just be merged)
180 */
181static inline int page_cache_add_speculative(struct page *page, int count)
182{
183 VM_BUG_ON(in_interrupt());
184
b560d8ad 185#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
bdd4e85d 186# ifdef CONFIG_PREEMPT_COUNT
ce0ad7f0
NP
187 VM_BUG_ON(!in_atomic());
188# endif
309381fe 189 VM_BUG_ON_PAGE(page_count(page) == 0, page);
ce0ad7f0
NP
190 atomic_add(count, &page->_count);
191
192#else
193 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
194 return 0;
195#endif
309381fe 196 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
ce0ad7f0
NP
197
198 return 1;
199}
200
e286781d
NP
201static inline int page_freeze_refs(struct page *page, int count)
202{
203 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
204}
205
206static inline void page_unfreeze_refs(struct page *page, int count)
207{
309381fe 208 VM_BUG_ON_PAGE(page_count(page) != 0, page);
e286781d
NP
209 VM_BUG_ON(count == 0);
210
211 atomic_set(&page->_count, count);
212}
213
44110fe3 214#ifdef CONFIG_NUMA
2ae88149 215extern struct page *__page_cache_alloc(gfp_t gfp);
44110fe3 216#else
2ae88149
NP
217static inline struct page *__page_cache_alloc(gfp_t gfp)
218{
219 return alloc_pages(gfp, 0);
220}
221#endif
222
1da177e4
LT
223static inline struct page *page_cache_alloc(struct address_space *x)
224{
2ae88149 225 return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4
LT
226}
227
228static inline struct page *page_cache_alloc_cold(struct address_space *x)
229{
2ae88149 230 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
1da177e4
LT
231}
232
7b1de586
WF
233static inline struct page *page_cache_alloc_readahead(struct address_space *x)
234{
235 return __page_cache_alloc(mapping_gfp_mask(x) |
236 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
237}
238
1da177e4
LT
239typedef int filler_t(void *, struct page *);
240
e7b563bb
JW
241pgoff_t page_cache_next_hole(struct address_space *mapping,
242 pgoff_t index, unsigned long max_scan);
243pgoff_t page_cache_prev_hole(struct address_space *mapping,
244 pgoff_t index, unsigned long max_scan);
245
2457aec6
MG
246#define FGP_ACCESSED 0x00000001
247#define FGP_LOCK 0x00000002
248#define FGP_CREAT 0x00000004
249#define FGP_WRITE 0x00000008
250#define FGP_NOFS 0x00000010
251#define FGP_NOWAIT 0x00000020
252
253struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
45f87de5 254 int fgp_flags, gfp_t cache_gfp_mask);
2457aec6
MG
255
256/**
257 * find_get_page - find and get a page reference
258 * @mapping: the address_space to search
259 * @offset: the page index
260 *
261 * Looks up the page cache slot at @mapping & @offset. If there is a
262 * page cache page, it is returned with an increased refcount.
263 *
264 * Otherwise, %NULL is returned.
265 */
266static inline struct page *find_get_page(struct address_space *mapping,
267 pgoff_t offset)
268{
45f87de5 269 return pagecache_get_page(mapping, offset, 0, 0);
2457aec6
MG
270}
271
272static inline struct page *find_get_page_flags(struct address_space *mapping,
273 pgoff_t offset, int fgp_flags)
274{
45f87de5 275 return pagecache_get_page(mapping, offset, fgp_flags, 0);
2457aec6
MG
276}
277
278/**
279 * find_lock_page - locate, pin and lock a pagecache page
280 * pagecache_get_page - find and get a page reference
281 * @mapping: the address_space to search
282 * @offset: the page index
283 *
284 * Looks up the page cache slot at @mapping & @offset. If there is a
285 * page cache page, it is returned locked and with an increased
286 * refcount.
287 *
288 * Otherwise, %NULL is returned.
289 *
290 * find_lock_page() may sleep.
291 */
292static inline struct page *find_lock_page(struct address_space *mapping,
293 pgoff_t offset)
294{
45f87de5 295 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
2457aec6
MG
296}
297
298/**
299 * find_or_create_page - locate or add a pagecache page
300 * @mapping: the page's address_space
301 * @index: the page's index into the mapping
302 * @gfp_mask: page allocation mode
303 *
304 * Looks up the page cache slot at @mapping & @offset. If there is a
305 * page cache page, it is returned locked and with an increased
306 * refcount.
307 *
308 * If the page is not present, a new page is allocated using @gfp_mask
309 * and added to the page cache and the VM's LRU list. The page is
310 * returned locked and with an increased refcount.
311 *
312 * On memory exhaustion, %NULL is returned.
313 *
314 * find_or_create_page() may sleep, even if @gfp_flags specifies an
315 * atomic allocation!
316 */
317static inline struct page *find_or_create_page(struct address_space *mapping,
318 pgoff_t offset, gfp_t gfp_mask)
319{
320 return pagecache_get_page(mapping, offset,
321 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
45f87de5 322 gfp_mask);
2457aec6
MG
323}
324
325/**
326 * grab_cache_page_nowait - returns locked page at given index in given cache
327 * @mapping: target address_space
328 * @index: the page index
329 *
330 * Same as grab_cache_page(), but do not wait if the page is unavailable.
331 * This is intended for speculative data generators, where the data can
332 * be regenerated if the page couldn't be grabbed. This routine should
333 * be safe to call while holding the lock for another page.
334 *
335 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
336 * and deadlock against the caller's locked page.
337 */
338static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
339 pgoff_t index)
340{
341 return pagecache_get_page(mapping, index,
342 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
45f87de5 343 mapping_gfp_mask(mapping));
2457aec6
MG
344}
345
0cd6144a 346struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
0cd6144a 347struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
0cd6144a
JW
348unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
349 unsigned int nr_entries, struct page **entries,
350 pgoff_t *indices);
1da177e4
LT
351unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
352 unsigned int nr_pages, struct page **pages);
ebf43500
JA
353unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
354 unsigned int nr_pages, struct page **pages);
1da177e4
LT
355unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
356 int tag, unsigned int nr_pages, struct page **pages);
357
54566b2c
NP
358struct page *grab_cache_page_write_begin(struct address_space *mapping,
359 pgoff_t index, unsigned flags);
afddba49 360
1da177e4
LT
361/*
362 * Returns locked page at given index in given cache, creating it if needed.
363 */
57f6b96c
FW
364static inline struct page *grab_cache_page(struct address_space *mapping,
365 pgoff_t index)
1da177e4
LT
366{
367 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
368}
369
1da177e4 370extern struct page * read_cache_page(struct address_space *mapping,
5e5358e7 371 pgoff_t index, filler_t *filler, void *data);
0531b2aa
LT
372extern struct page * read_cache_page_gfp(struct address_space *mapping,
373 pgoff_t index, gfp_t gfp_mask);
1da177e4
LT
374extern int read_cache_pages(struct address_space *mapping,
375 struct list_head *pages, filler_t *filler, void *data);
376
090d2b18 377static inline struct page *read_mapping_page(struct address_space *mapping,
5e5358e7 378 pgoff_t index, void *data)
090d2b18
PE
379{
380 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
381 return read_cache_page(mapping, index, filler, data);
382}
383
a0f7a756
NH
384/*
385 * Get the offset in PAGE_SIZE.
386 * (TODO: hugepage should have ->index in PAGE_SIZE)
387 */
388static inline pgoff_t page_to_pgoff(struct page *page)
389{
390 if (unlikely(PageHeadHuge(page)))
391 return page->index << compound_order(page);
392 else
393 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
394}
395
1da177e4
LT
396/*
397 * Return byte-offset into filesystem object for page.
398 */
399static inline loff_t page_offset(struct page *page)
400{
401 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
402}
403
f981c595
MG
404static inline loff_t page_file_offset(struct page *page)
405{
406 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
407}
408
0fe6e20b
NH
409extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
410 unsigned long address);
411
1da177e4
LT
412static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
413 unsigned long address)
414{
0fe6e20b
NH
415 pgoff_t pgoff;
416 if (unlikely(is_vm_hugetlb_page(vma)))
417 return linear_hugepage_index(vma, address);
418 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1da177e4
LT
419 pgoff += vma->vm_pgoff;
420 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
421}
422
b3c97528
HH
423extern void __lock_page(struct page *page);
424extern int __lock_page_killable(struct page *page);
d065bd81
ML
425extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
426 unsigned int flags);
b3c97528 427extern void unlock_page(struct page *page);
1da177e4 428
f45840b5 429static inline void __set_page_locked(struct page *page)
529ae9aa 430{
f45840b5 431 __set_bit(PG_locked, &page->flags);
529ae9aa
NP
432}
433
f45840b5 434static inline void __clear_page_locked(struct page *page)
529ae9aa 435{
f45840b5 436 __clear_bit(PG_locked, &page->flags);
529ae9aa
NP
437}
438
439static inline int trylock_page(struct page *page)
440{
8413ac9d 441 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
529ae9aa
NP
442}
443
db37648c
NP
444/*
445 * lock_page may only be called if we have the page's inode pinned.
446 */
1da177e4
LT
447static inline void lock_page(struct page *page)
448{
449 might_sleep();
529ae9aa 450 if (!trylock_page(page))
1da177e4
LT
451 __lock_page(page);
452}
db37648c 453
2687a356
MW
454/*
455 * lock_page_killable is like lock_page but can be interrupted by fatal
456 * signals. It returns 0 if it locked the page and -EINTR if it was
457 * killed while waiting.
458 */
459static inline int lock_page_killable(struct page *page)
460{
461 might_sleep();
529ae9aa 462 if (!trylock_page(page))
2687a356
MW
463 return __lock_page_killable(page);
464 return 0;
465}
466
d065bd81
ML
467/*
468 * lock_page_or_retry - Lock the page, unless this would block and the
469 * caller indicated that it can handle a retry.
9a95f3cf
PC
470 *
471 * Return value and mmap_sem implications depend on flags; see
472 * __lock_page_or_retry().
d065bd81
ML
473 */
474static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
475 unsigned int flags)
476{
477 might_sleep();
478 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
479}
480
1da177e4 481/*
a4796e37
N
482 * This is exported only for wait_on_page_locked/wait_on_page_writeback,
483 * and for filesystems which need to wait on PG_private.
1da177e4 484 */
b3c97528 485extern void wait_on_page_bit(struct page *page, int bit_nr);
1da177e4 486
f62e00cc 487extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
cbbce822
N
488extern int wait_on_page_bit_killable_timeout(struct page *page,
489 int bit_nr, unsigned long timeout);
f62e00cc
KM
490
491static inline int wait_on_page_locked_killable(struct page *page)
492{
493 if (PageLocked(page))
494 return wait_on_page_bit_killable(page, PG_locked);
495 return 0;
496}
497
a4796e37
N
498extern wait_queue_head_t *page_waitqueue(struct page *page);
499static inline void wake_up_page(struct page *page, int bit)
500{
501 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
502}
503
1da177e4
LT
504/*
505 * Wait for a page to be unlocked.
506 *
507 * This must be called with the caller "holding" the page,
508 * ie with increased "page->count" so that the page won't
509 * go away during the wait..
510 */
511static inline void wait_on_page_locked(struct page *page)
512{
513 if (PageLocked(page))
514 wait_on_page_bit(page, PG_locked);
515}
516
517/*
518 * Wait for a page to complete writeback
519 */
520static inline void wait_on_page_writeback(struct page *page)
521{
522 if (PageWriteback(page))
523 wait_on_page_bit(page, PG_writeback);
524}
525
526extern void end_page_writeback(struct page *page);
1d1d1a76 527void wait_for_stable_page(struct page *page);
1da177e4 528
57d99845
MW
529void page_endio(struct page *page, int rw, int err);
530
385e1ca5
DH
531/*
532 * Add an arbitrary waiter to a page's wait queue
533 */
534extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
535
1da177e4
LT
536/*
537 * Fault a userspace page into pagetables. Return non-zero on a fault.
538 *
539 * This assumes that two userspace pages are always sufficient. That's
540 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
541 */
542static inline int fault_in_pages_writeable(char __user *uaddr, int size)
543{
544 int ret;
545
08291429
NP
546 if (unlikely(size == 0))
547 return 0;
548
1da177e4
LT
549 /*
550 * Writing zeroes into userspace here is OK, because we know that if
551 * the zero gets there, we'll be overwriting it.
552 */
553 ret = __put_user(0, uaddr);
554 if (ret == 0) {
555 char __user *end = uaddr + size - 1;
556
557 /*
558 * If the page was already mapped, this will get a cache miss
559 * for sure, so try to avoid doing it.
560 */
561 if (((unsigned long)uaddr & PAGE_MASK) !=
562 ((unsigned long)end & PAGE_MASK))
f56f821f 563 ret = __put_user(0, end);
1da177e4
LT
564 }
565 return ret;
566}
567
08291429 568static inline int fault_in_pages_readable(const char __user *uaddr, int size)
1da177e4
LT
569{
570 volatile char c;
571 int ret;
572
08291429
NP
573 if (unlikely(size == 0))
574 return 0;
575
1da177e4
LT
576 ret = __get_user(c, uaddr);
577 if (ret == 0) {
578 const char __user *end = uaddr + size - 1;
579
580 if (((unsigned long)uaddr & PAGE_MASK) !=
627295e4 581 ((unsigned long)end & PAGE_MASK)) {
f56f821f 582 ret = __get_user(c, end);
627295e4
AK
583 (void)c;
584 }
1da177e4 585 }
08291429 586 return ret;
1da177e4
LT
587}
588
f56f821f
DV
589/*
590 * Multipage variants of the above prefault helpers, useful if more than
591 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
592 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
593 * filemap.c hotpaths.
594 */
595static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
596{
af2e8409 597 int ret = 0;
9923777d 598 char __user *end = uaddr + size - 1;
f56f821f
DV
599
600 if (unlikely(size == 0))
af2e8409 601 return ret;
f56f821f
DV
602
603 /*
604 * Writing zeroes into userspace here is OK, because we know that if
605 * the zero gets there, we'll be overwriting it.
606 */
607 while (uaddr <= end) {
608 ret = __put_user(0, uaddr);
609 if (ret != 0)
610 return ret;
611 uaddr += PAGE_SIZE;
612 }
613
614 /* Check whether the range spilled into the next page. */
615 if (((unsigned long)uaddr & PAGE_MASK) ==
616 ((unsigned long)end & PAGE_MASK))
617 ret = __put_user(0, end);
618
619 return ret;
620}
621
622static inline int fault_in_multipages_readable(const char __user *uaddr,
623 int size)
624{
625 volatile char c;
af2e8409 626 int ret = 0;
f56f821f
DV
627 const char __user *end = uaddr + size - 1;
628
629 if (unlikely(size == 0))
af2e8409 630 return ret;
f56f821f
DV
631
632 while (uaddr <= end) {
633 ret = __get_user(c, uaddr);
634 if (ret != 0)
635 return ret;
636 uaddr += PAGE_SIZE;
637 }
638
639 /* Check whether the range spilled into the next page. */
640 if (((unsigned long)uaddr & PAGE_MASK) ==
641 ((unsigned long)end & PAGE_MASK)) {
642 ret = __get_user(c, end);
643 (void)c;
644 }
645
646 return ret;
647}
648
529ae9aa
NP
649int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
650 pgoff_t index, gfp_t gfp_mask);
651int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
652 pgoff_t index, gfp_t gfp_mask);
97cecb5a 653extern void delete_from_page_cache(struct page *page);
91b0abe3 654extern void __delete_from_page_cache(struct page *page, void *shadow);
ef6a3c63 655int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
529ae9aa
NP
656
657/*
658 * Like add_to_page_cache_locked, but used to add newly allocated pages:
f45840b5 659 * the page is new, so we can just run __set_page_locked() against it.
529ae9aa
NP
660 */
661static inline int add_to_page_cache(struct page *page,
662 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
663{
664 int error;
665
f45840b5 666 __set_page_locked(page);
529ae9aa
NP
667 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
668 if (unlikely(error))
f45840b5 669 __clear_page_locked(page);
529ae9aa
NP
670 return error;
671}
672
1da177e4 673#endif /* _LINUX_PAGEMAP_H */