hugetlb: fix pool resizing corner case
[linux-2.6-block.git] / mm / hugetlb.c
CommitLineData
1da177e4
LT
1/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
1da177e4
LT
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
63551ae0 13#include <linux/pagemap.h>
5da7ca86 14#include <linux/mempolicy.h>
aea47ff3 15#include <linux/cpuset.h>
3935baa9 16#include <linux/mutex.h>
5da7ca86 17
63551ae0
DG
18#include <asm/page.h>
19#include <asm/pgtable.h>
20
21#include <linux/hugetlb.h>
7835e98b 22#include "internal.h"
1da177e4
LT
23
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
a43a8c39 25static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
7893d1d5 26static unsigned long surplus_huge_pages;
1da177e4
LT
27unsigned long max_huge_pages;
28static struct list_head hugepage_freelists[MAX_NUMNODES];
29static unsigned int nr_huge_pages_node[MAX_NUMNODES];
30static unsigned int free_huge_pages_node[MAX_NUMNODES];
7893d1d5 31static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
396faf03
MG
32static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
33unsigned long hugepages_treat_as_movable;
54f9f80d 34int hugetlb_dynamic_pool;
396faf03 35
3935baa9
DG
36/*
37 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
38 */
39static DEFINE_SPINLOCK(hugetlb_lock);
0bd0f9fb 40
79ac6ba4
DG
41static void clear_huge_page(struct page *page, unsigned long addr)
42{
43 int i;
44
45 might_sleep();
46 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
47 cond_resched();
281e0e3b 48 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
79ac6ba4
DG
49 }
50}
51
52static void copy_huge_page(struct page *dst, struct page *src,
9de455b2 53 unsigned long addr, struct vm_area_struct *vma)
79ac6ba4
DG
54{
55 int i;
56
57 might_sleep();
58 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
59 cond_resched();
9de455b2 60 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
79ac6ba4
DG
61 }
62}
63
1da177e4
LT
64static void enqueue_huge_page(struct page *page)
65{
66 int nid = page_to_nid(page);
67 list_add(&page->lru, &hugepage_freelists[nid]);
68 free_huge_pages++;
69 free_huge_pages_node[nid]++;
70}
71
5da7ca86
CL
72static struct page *dequeue_huge_page(struct vm_area_struct *vma,
73 unsigned long address)
1da177e4 74{
31a5c6e4 75 int nid;
1da177e4 76 struct page *page = NULL;
480eccf9 77 struct mempolicy *mpol;
396faf03 78 struct zonelist *zonelist = huge_zonelist(vma, address,
480eccf9 79 htlb_alloc_mask, &mpol);
96df9333 80 struct zone **z;
1da177e4 81
96df9333 82 for (z = zonelist->zones; *z; z++) {
89fa3024 83 nid = zone_to_nid(*z);
396faf03 84 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
3abf7afd
AM
85 !list_empty(&hugepage_freelists[nid])) {
86 page = list_entry(hugepage_freelists[nid].next,
87 struct page, lru);
88 list_del(&page->lru);
89 free_huge_pages--;
90 free_huge_pages_node[nid]--;
e4e574b7
AL
91 if (vma && vma->vm_flags & VM_MAYSHARE)
92 resv_huge_pages--;
5ab3ee7b 93 break;
3abf7afd 94 }
1da177e4 95 }
480eccf9 96 mpol_free(mpol); /* unref if mpol !NULL */
1da177e4
LT
97 return page;
98}
99
6af2acb6
AL
100static void update_and_free_page(struct page *page)
101{
102 int i;
103 nr_huge_pages--;
104 nr_huge_pages_node[page_to_nid(page)]--;
105 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
106 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
107 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
108 1 << PG_private | 1<< PG_writeback);
109 }
110 set_compound_page_dtor(page, NULL);
111 set_page_refcounted(page);
112 __free_pages(page, HUGETLB_PAGE_ORDER);
113}
114
27a85ef1
DG
115static void free_huge_page(struct page *page)
116{
7893d1d5 117 int nid = page_to_nid(page);
27a85ef1 118
7893d1d5 119 BUG_ON(page_count(page));
27a85ef1
DG
120 INIT_LIST_HEAD(&page->lru);
121
122 spin_lock(&hugetlb_lock);
7893d1d5
AL
123 if (surplus_huge_pages_node[nid]) {
124 update_and_free_page(page);
125 surplus_huge_pages--;
126 surplus_huge_pages_node[nid]--;
127 } else {
128 enqueue_huge_page(page);
129 }
27a85ef1
DG
130 spin_unlock(&hugetlb_lock);
131}
132
7893d1d5
AL
133/*
134 * Increment or decrement surplus_huge_pages. Keep node-specific counters
135 * balanced by operating on them in a round-robin fashion.
136 * Returns 1 if an adjustment was made.
137 */
138static int adjust_pool_surplus(int delta)
139{
140 static int prev_nid;
141 int nid = prev_nid;
142 int ret = 0;
143
144 VM_BUG_ON(delta != -1 && delta != 1);
145 do {
146 nid = next_node(nid, node_online_map);
147 if (nid == MAX_NUMNODES)
148 nid = first_node(node_online_map);
149
150 /* To shrink on this node, there must be a surplus page */
151 if (delta < 0 && !surplus_huge_pages_node[nid])
152 continue;
153 /* Surplus cannot exceed the total number of pages */
154 if (delta > 0 && surplus_huge_pages_node[nid] >=
155 nr_huge_pages_node[nid])
156 continue;
157
158 surplus_huge_pages += delta;
159 surplus_huge_pages_node[nid] += delta;
160 ret = 1;
161 break;
162 } while (nid != prev_nid);
163
164 prev_nid = nid;
165 return ret;
166}
167
a482289d 168static int alloc_fresh_huge_page(void)
1da177e4 169{
f96efd58 170 static int prev_nid;
1da177e4 171 struct page *page;
f96efd58
JJ
172 int nid;
173
7ed5cb2b
HD
174 /*
175 * Copy static prev_nid to local nid, work on that, then copy it
176 * back to prev_nid afterwards: otherwise there's a window in which
177 * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
178 * But we don't need to use a spin_lock here: it really doesn't
179 * matter if occasionally a racer chooses the same nid as we do.
180 */
f96efd58 181 nid = next_node(prev_nid, node_online_map);
fdb7cc59
PJ
182 if (nid == MAX_NUMNODES)
183 nid = first_node(node_online_map);
f96efd58 184 prev_nid = nid;
f96efd58 185
396faf03 186 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
f96efd58 187 HUGETLB_PAGE_ORDER);
1da177e4 188 if (page) {
33f2ef89 189 set_compound_page_dtor(page, free_huge_page);
0bd0f9fb 190 spin_lock(&hugetlb_lock);
1da177e4
LT
191 nr_huge_pages++;
192 nr_huge_pages_node[page_to_nid(page)]++;
0bd0f9fb 193 spin_unlock(&hugetlb_lock);
a482289d
NP
194 put_page(page); /* free it into the hugepage allocator */
195 return 1;
1da177e4 196 }
a482289d 197 return 0;
1da177e4
LT
198}
199
7893d1d5
AL
200static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
201 unsigned long address)
202{
203 struct page *page;
204
54f9f80d
AL
205 /* Check if the dynamic pool is enabled */
206 if (!hugetlb_dynamic_pool)
207 return NULL;
208
7893d1d5
AL
209 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
210 HUGETLB_PAGE_ORDER);
211 if (page) {
212 set_compound_page_dtor(page, free_huge_page);
213 spin_lock(&hugetlb_lock);
214 nr_huge_pages++;
215 nr_huge_pages_node[page_to_nid(page)]++;
216 surplus_huge_pages++;
217 surplus_huge_pages_node[page_to_nid(page)]++;
218 spin_unlock(&hugetlb_lock);
219 }
220
221 return page;
222}
223
e4e574b7
AL
224/*
225 * Increase the hugetlb pool such that it can accomodate a reservation
226 * of size 'delta'.
227 */
228static int gather_surplus_pages(int delta)
229{
230 struct list_head surplus_list;
231 struct page *page, *tmp;
232 int ret, i;
233 int needed, allocated;
234
235 needed = (resv_huge_pages + delta) - free_huge_pages;
236 if (needed <= 0)
237 return 0;
238
239 allocated = 0;
240 INIT_LIST_HEAD(&surplus_list);
241
242 ret = -ENOMEM;
243retry:
244 spin_unlock(&hugetlb_lock);
245 for (i = 0; i < needed; i++) {
246 page = alloc_buddy_huge_page(NULL, 0);
247 if (!page) {
248 /*
249 * We were not able to allocate enough pages to
250 * satisfy the entire reservation so we free what
251 * we've allocated so far.
252 */
253 spin_lock(&hugetlb_lock);
254 needed = 0;
255 goto free;
256 }
257
258 list_add(&page->lru, &surplus_list);
259 }
260 allocated += needed;
261
262 /*
263 * After retaking hugetlb_lock, we need to recalculate 'needed'
264 * because either resv_huge_pages or free_huge_pages may have changed.
265 */
266 spin_lock(&hugetlb_lock);
267 needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
268 if (needed > 0)
269 goto retry;
270
271 /*
272 * The surplus_list now contains _at_least_ the number of extra pages
273 * needed to accomodate the reservation. Add the appropriate number
274 * of pages to the hugetlb pool and free the extras back to the buddy
275 * allocator.
276 */
277 needed += allocated;
278 ret = 0;
279free:
280 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
281 list_del(&page->lru);
282 if ((--needed) >= 0)
283 enqueue_huge_page(page);
284 else
285 update_and_free_page(page);
286 }
287
288 return ret;
289}
290
291/*
292 * When releasing a hugetlb pool reservation, any surplus pages that were
293 * allocated to satisfy the reservation must be explicitly freed if they were
294 * never used.
295 */
296void return_unused_surplus_pages(unsigned long unused_resv_pages)
297{
298 static int nid = -1;
299 struct page *page;
300 unsigned long nr_pages;
301
302 nr_pages = min(unused_resv_pages, surplus_huge_pages);
303
304 while (nr_pages) {
305 nid = next_node(nid, node_online_map);
306 if (nid == MAX_NUMNODES)
307 nid = first_node(node_online_map);
308
309 if (!surplus_huge_pages_node[nid])
310 continue;
311
312 if (!list_empty(&hugepage_freelists[nid])) {
313 page = list_entry(hugepage_freelists[nid].next,
314 struct page, lru);
315 list_del(&page->lru);
316 update_and_free_page(page);
317 free_huge_pages--;
318 free_huge_pages_node[nid]--;
319 surplus_huge_pages--;
320 surplus_huge_pages_node[nid]--;
321 nr_pages--;
322 }
323 }
324}
325
27a85ef1
DG
326static struct page *alloc_huge_page(struct vm_area_struct *vma,
327 unsigned long addr)
1da177e4 328{
7893d1d5 329 struct page *page = NULL;
e4e574b7 330 int use_reserved_page = vma->vm_flags & VM_MAYSHARE;
1da177e4
LT
331
332 spin_lock(&hugetlb_lock);
e4e574b7 333 if (!use_reserved_page && (free_huge_pages <= resv_huge_pages))
a43a8c39 334 goto fail;
b45b5bd6
DG
335
336 page = dequeue_huge_page(vma, addr);
337 if (!page)
338 goto fail;
339
1da177e4 340 spin_unlock(&hugetlb_lock);
7835e98b 341 set_page_refcounted(page);
1da177e4 342 return page;
b45b5bd6 343
a43a8c39 344fail:
b45b5bd6 345 spin_unlock(&hugetlb_lock);
7893d1d5
AL
346
347 /*
348 * Private mappings do not use reserved huge pages so the allocation
349 * may have failed due to an undersized hugetlb pool. Try to grab a
350 * surplus huge page from the buddy allocator.
351 */
e4e574b7 352 if (!use_reserved_page)
7893d1d5
AL
353 page = alloc_buddy_huge_page(vma, addr);
354
355 return page;
b45b5bd6
DG
356}
357
1da177e4
LT
358static int __init hugetlb_init(void)
359{
360 unsigned long i;
1da177e4 361
3c726f8d
BH
362 if (HPAGE_SHIFT == 0)
363 return 0;
364
1da177e4
LT
365 for (i = 0; i < MAX_NUMNODES; ++i)
366 INIT_LIST_HEAD(&hugepage_freelists[i]);
367
368 for (i = 0; i < max_huge_pages; ++i) {
a482289d 369 if (!alloc_fresh_huge_page())
1da177e4 370 break;
1da177e4
LT
371 }
372 max_huge_pages = free_huge_pages = nr_huge_pages = i;
373 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
374 return 0;
375}
376module_init(hugetlb_init);
377
378static int __init hugetlb_setup(char *s)
379{
380 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
381 max_huge_pages = 0;
382 return 1;
383}
384__setup("hugepages=", hugetlb_setup);
385
8a630112
KC
386static unsigned int cpuset_mems_nr(unsigned int *array)
387{
388 int node;
389 unsigned int nr = 0;
390
391 for_each_node_mask(node, cpuset_current_mems_allowed)
392 nr += array[node];
393
394 return nr;
395}
396
1da177e4 397#ifdef CONFIG_SYSCTL
1da177e4
LT
398#ifdef CONFIG_HIGHMEM
399static void try_to_free_low(unsigned long count)
400{
4415cc8d
CL
401 int i;
402
1da177e4
LT
403 for (i = 0; i < MAX_NUMNODES; ++i) {
404 struct page *page, *next;
405 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
6b0c880d
AL
406 if (count >= nr_huge_pages)
407 return;
1da177e4
LT
408 if (PageHighMem(page))
409 continue;
410 list_del(&page->lru);
411 update_and_free_page(page);
1da177e4 412 free_huge_pages--;
4415cc8d 413 free_huge_pages_node[page_to_nid(page)]--;
1da177e4
LT
414 }
415 }
416}
417#else
418static inline void try_to_free_low(unsigned long count)
419{
420}
421#endif
422
7893d1d5 423#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
1da177e4
LT
424static unsigned long set_max_huge_pages(unsigned long count)
425{
7893d1d5 426 unsigned long min_count, ret;
1da177e4 427
7893d1d5
AL
428 /*
429 * Increase the pool size
430 * First take pages out of surplus state. Then make up the
431 * remaining difference by allocating fresh huge pages.
432 */
1da177e4 433 spin_lock(&hugetlb_lock);
7893d1d5
AL
434 while (surplus_huge_pages && count > persistent_huge_pages) {
435 if (!adjust_pool_surplus(-1))
436 break;
437 }
438
439 while (count > persistent_huge_pages) {
440 int ret;
441 /*
442 * If this allocation races such that we no longer need the
443 * page, free_huge_page will handle it by freeing the page
444 * and reducing the surplus.
445 */
446 spin_unlock(&hugetlb_lock);
447 ret = alloc_fresh_huge_page();
448 spin_lock(&hugetlb_lock);
449 if (!ret)
450 goto out;
451
452 }
7893d1d5
AL
453
454 /*
455 * Decrease the pool size
456 * First return free pages to the buddy allocator (being careful
457 * to keep enough around to satisfy reservations). Then place
458 * pages into surplus state as needed so the pool will shrink
459 * to the desired size as pages become free.
460 */
6b0c880d
AL
461 min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
462 min_count = max(count, min_count);
7893d1d5
AL
463 try_to_free_low(min_count);
464 while (min_count < persistent_huge_pages) {
5da7ca86 465 struct page *page = dequeue_huge_page(NULL, 0);
1da177e4
LT
466 if (!page)
467 break;
468 update_and_free_page(page);
469 }
7893d1d5
AL
470 while (count < persistent_huge_pages) {
471 if (!adjust_pool_surplus(1))
472 break;
473 }
474out:
475 ret = persistent_huge_pages;
1da177e4 476 spin_unlock(&hugetlb_lock);
7893d1d5 477 return ret;
1da177e4
LT
478}
479
480int hugetlb_sysctl_handler(struct ctl_table *table, int write,
481 struct file *file, void __user *buffer,
482 size_t *length, loff_t *ppos)
483{
484 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
485 max_huge_pages = set_max_huge_pages(max_huge_pages);
486 return 0;
487}
396faf03
MG
488
489int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
490 struct file *file, void __user *buffer,
491 size_t *length, loff_t *ppos)
492{
493 proc_dointvec(table, write, file, buffer, length, ppos);
494 if (hugepages_treat_as_movable)
495 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
496 else
497 htlb_alloc_mask = GFP_HIGHUSER;
498 return 0;
499}
500
1da177e4
LT
501#endif /* CONFIG_SYSCTL */
502
503int hugetlb_report_meminfo(char *buf)
504{
505 return sprintf(buf,
506 "HugePages_Total: %5lu\n"
507 "HugePages_Free: %5lu\n"
a43a8c39 508 "HugePages_Rsvd: %5lu\n"
7893d1d5 509 "HugePages_Surp: %5lu\n"
1da177e4
LT
510 "Hugepagesize: %5lu kB\n",
511 nr_huge_pages,
512 free_huge_pages,
a43a8c39 513 resv_huge_pages,
7893d1d5 514 surplus_huge_pages,
1da177e4
LT
515 HPAGE_SIZE/1024);
516}
517
518int hugetlb_report_node_meminfo(int nid, char *buf)
519{
520 return sprintf(buf,
521 "Node %d HugePages_Total: %5u\n"
522 "Node %d HugePages_Free: %5u\n",
523 nid, nr_huge_pages_node[nid],
524 nid, free_huge_pages_node[nid]);
525}
526
1da177e4
LT
527/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
528unsigned long hugetlb_total_pages(void)
529{
530 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
531}
1da177e4
LT
532
533/*
534 * We cannot handle pagefaults against hugetlb pages at all. They cause
535 * handle_mm_fault() to try to instantiate regular-sized pages in the
536 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
537 * this far.
538 */
d0217ac0 539static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4
LT
540{
541 BUG();
d0217ac0 542 return 0;
1da177e4
LT
543}
544
545struct vm_operations_struct hugetlb_vm_ops = {
d0217ac0 546 .fault = hugetlb_vm_op_fault,
1da177e4
LT
547};
548
1e8f889b
DG
549static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
550 int writable)
63551ae0
DG
551{
552 pte_t entry;
553
1e8f889b 554 if (writable) {
63551ae0
DG
555 entry =
556 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
557 } else {
558 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
559 }
560 entry = pte_mkyoung(entry);
561 entry = pte_mkhuge(entry);
562
563 return entry;
564}
565
1e8f889b
DG
566static void set_huge_ptep_writable(struct vm_area_struct *vma,
567 unsigned long address, pte_t *ptep)
568{
569 pte_t entry;
570
571 entry = pte_mkwrite(pte_mkdirty(*ptep));
8dab5241
BH
572 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
573 update_mmu_cache(vma, address, entry);
8dab5241 574 }
1e8f889b
DG
575}
576
577
63551ae0
DG
578int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
579 struct vm_area_struct *vma)
580{
581 pte_t *src_pte, *dst_pte, entry;
582 struct page *ptepage;
1c59827d 583 unsigned long addr;
1e8f889b
DG
584 int cow;
585
586 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
63551ae0 587
1c59827d 588 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
c74df32c
HD
589 src_pte = huge_pte_offset(src, addr);
590 if (!src_pte)
591 continue;
63551ae0
DG
592 dst_pte = huge_pte_alloc(dst, addr);
593 if (!dst_pte)
594 goto nomem;
c74df32c 595 spin_lock(&dst->page_table_lock);
1c59827d 596 spin_lock(&src->page_table_lock);
c74df32c 597 if (!pte_none(*src_pte)) {
1e8f889b
DG
598 if (cow)
599 ptep_set_wrprotect(src, addr, src_pte);
1c59827d
HD
600 entry = *src_pte;
601 ptepage = pte_page(entry);
602 get_page(ptepage);
1c59827d
HD
603 set_huge_pte_at(dst, addr, dst_pte, entry);
604 }
605 spin_unlock(&src->page_table_lock);
c74df32c 606 spin_unlock(&dst->page_table_lock);
63551ae0
DG
607 }
608 return 0;
609
610nomem:
611 return -ENOMEM;
612}
613
502717f4
CK
614void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
615 unsigned long end)
63551ae0
DG
616{
617 struct mm_struct *mm = vma->vm_mm;
618 unsigned long address;
c7546f8f 619 pte_t *ptep;
63551ae0
DG
620 pte_t pte;
621 struct page *page;
fe1668ae 622 struct page *tmp;
c0a499c2
CK
623 /*
624 * A page gathering list, protected by per file i_mmap_lock. The
625 * lock is used to avoid list corruption from multiple unmapping
626 * of the same page since we are using page->lru.
627 */
fe1668ae 628 LIST_HEAD(page_list);
63551ae0
DG
629
630 WARN_ON(!is_vm_hugetlb_page(vma));
631 BUG_ON(start & ~HPAGE_MASK);
632 BUG_ON(end & ~HPAGE_MASK);
633
508034a3 634 spin_lock(&mm->page_table_lock);
63551ae0 635 for (address = start; address < end; address += HPAGE_SIZE) {
c7546f8f 636 ptep = huge_pte_offset(mm, address);
4c887265 637 if (!ptep)
c7546f8f
DG
638 continue;
639
39dde65c
CK
640 if (huge_pmd_unshare(mm, &address, ptep))
641 continue;
642
c7546f8f 643 pte = huge_ptep_get_and_clear(mm, address, ptep);
63551ae0
DG
644 if (pte_none(pte))
645 continue;
c7546f8f 646
63551ae0 647 page = pte_page(pte);
6649a386
KC
648 if (pte_dirty(pte))
649 set_page_dirty(page);
fe1668ae 650 list_add(&page->lru, &page_list);
63551ae0 651 }
1da177e4 652 spin_unlock(&mm->page_table_lock);
508034a3 653 flush_tlb_range(vma, start, end);
fe1668ae
CK
654 list_for_each_entry_safe(page, tmp, &page_list, lru) {
655 list_del(&page->lru);
656 put_page(page);
657 }
1da177e4 658}
63551ae0 659
502717f4
CK
660void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
661 unsigned long end)
662{
663 /*
664 * It is undesirable to test vma->vm_file as it should be non-null
665 * for valid hugetlb area. However, vm_file will be NULL in the error
666 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
667 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
668 * to clean up. Since no pte has actually been setup, it is safe to
669 * do nothing in this case.
670 */
671 if (vma->vm_file) {
672 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
673 __unmap_hugepage_range(vma, start, end);
674 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
675 }
676}
677
1e8f889b
DG
678static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
679 unsigned long address, pte_t *ptep, pte_t pte)
680{
681 struct page *old_page, *new_page;
79ac6ba4 682 int avoidcopy;
1e8f889b
DG
683
684 old_page = pte_page(pte);
685
686 /* If no-one else is actually using this page, avoid the copy
687 * and just make the page writable */
688 avoidcopy = (page_count(old_page) == 1);
689 if (avoidcopy) {
690 set_huge_ptep_writable(vma, address, ptep);
83c54070 691 return 0;
1e8f889b
DG
692 }
693
694 page_cache_get(old_page);
5da7ca86 695 new_page = alloc_huge_page(vma, address);
1e8f889b
DG
696
697 if (!new_page) {
698 page_cache_release(old_page);
0df420d8 699 return VM_FAULT_OOM;
1e8f889b
DG
700 }
701
702 spin_unlock(&mm->page_table_lock);
9de455b2 703 copy_huge_page(new_page, old_page, address, vma);
1e8f889b
DG
704 spin_lock(&mm->page_table_lock);
705
706 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
707 if (likely(pte_same(*ptep, pte))) {
708 /* Break COW */
709 set_huge_pte_at(mm, address, ptep,
710 make_huge_pte(vma, new_page, 1));
711 /* Make the old page be freed below */
712 new_page = old_page;
713 }
714 page_cache_release(new_page);
715 page_cache_release(old_page);
83c54070 716 return 0;
1e8f889b
DG
717}
718
a1ed3dda 719static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1e8f889b 720 unsigned long address, pte_t *ptep, int write_access)
ac9b9c66
HD
721{
722 int ret = VM_FAULT_SIGBUS;
4c887265
AL
723 unsigned long idx;
724 unsigned long size;
4c887265
AL
725 struct page *page;
726 struct address_space *mapping;
1e8f889b 727 pte_t new_pte;
4c887265 728
4c887265
AL
729 mapping = vma->vm_file->f_mapping;
730 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
731 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
732
733 /*
734 * Use page lock to guard against racing truncation
735 * before we get page_table_lock.
736 */
6bda666a
CL
737retry:
738 page = find_lock_page(mapping, idx);
739 if (!page) {
ebed4bfc
HD
740 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
741 if (idx >= size)
742 goto out;
6bda666a
CL
743 if (hugetlb_get_quota(mapping))
744 goto out;
745 page = alloc_huge_page(vma, address);
746 if (!page) {
747 hugetlb_put_quota(mapping);
0df420d8 748 ret = VM_FAULT_OOM;
6bda666a
CL
749 goto out;
750 }
79ac6ba4 751 clear_huge_page(page, address);
ac9b9c66 752
6bda666a
CL
753 if (vma->vm_flags & VM_SHARED) {
754 int err;
755
756 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
757 if (err) {
758 put_page(page);
759 hugetlb_put_quota(mapping);
760 if (err == -EEXIST)
761 goto retry;
762 goto out;
763 }
764 } else
765 lock_page(page);
766 }
1e8f889b 767
ac9b9c66 768 spin_lock(&mm->page_table_lock);
4c887265
AL
769 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
770 if (idx >= size)
771 goto backout;
772
83c54070 773 ret = 0;
86e5216f 774 if (!pte_none(*ptep))
4c887265
AL
775 goto backout;
776
1e8f889b
DG
777 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
778 && (vma->vm_flags & VM_SHARED)));
779 set_huge_pte_at(mm, address, ptep, new_pte);
780
781 if (write_access && !(vma->vm_flags & VM_SHARED)) {
782 /* Optimization, do the COW without a second fault */
783 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
784 }
785
ac9b9c66 786 spin_unlock(&mm->page_table_lock);
4c887265
AL
787 unlock_page(page);
788out:
ac9b9c66 789 return ret;
4c887265
AL
790
791backout:
792 spin_unlock(&mm->page_table_lock);
793 hugetlb_put_quota(mapping);
794 unlock_page(page);
795 put_page(page);
796 goto out;
ac9b9c66
HD
797}
798
86e5216f
AL
799int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
800 unsigned long address, int write_access)
801{
802 pte_t *ptep;
803 pte_t entry;
1e8f889b 804 int ret;
3935baa9 805 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
86e5216f
AL
806
807 ptep = huge_pte_alloc(mm, address);
808 if (!ptep)
809 return VM_FAULT_OOM;
810
3935baa9
DG
811 /*
812 * Serialize hugepage allocation and instantiation, so that we don't
813 * get spurious allocation failures if two CPUs race to instantiate
814 * the same page in the page cache.
815 */
816 mutex_lock(&hugetlb_instantiation_mutex);
86e5216f 817 entry = *ptep;
3935baa9
DG
818 if (pte_none(entry)) {
819 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
820 mutex_unlock(&hugetlb_instantiation_mutex);
821 return ret;
822 }
86e5216f 823
83c54070 824 ret = 0;
1e8f889b
DG
825
826 spin_lock(&mm->page_table_lock);
827 /* Check for a racing update before calling hugetlb_cow */
828 if (likely(pte_same(entry, *ptep)))
829 if (write_access && !pte_write(entry))
830 ret = hugetlb_cow(mm, vma, address, ptep, entry);
831 spin_unlock(&mm->page_table_lock);
3935baa9 832 mutex_unlock(&hugetlb_instantiation_mutex);
1e8f889b
DG
833
834 return ret;
86e5216f
AL
835}
836
63551ae0
DG
837int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
838 struct page **pages, struct vm_area_struct **vmas,
839 unsigned long *position, int *length, int i)
840{
d5d4b0aa
CK
841 unsigned long pfn_offset;
842 unsigned long vaddr = *position;
63551ae0
DG
843 int remainder = *length;
844
1c59827d 845 spin_lock(&mm->page_table_lock);
63551ae0 846 while (vaddr < vma->vm_end && remainder) {
4c887265
AL
847 pte_t *pte;
848 struct page *page;
63551ae0 849
4c887265
AL
850 /*
851 * Some archs (sparc64, sh*) have multiple pte_ts to
852 * each hugepage. We have to make * sure we get the
853 * first, for the page indexing below to work.
854 */
855 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
63551ae0 856
4c887265
AL
857 if (!pte || pte_none(*pte)) {
858 int ret;
63551ae0 859
4c887265
AL
860 spin_unlock(&mm->page_table_lock);
861 ret = hugetlb_fault(mm, vma, vaddr, 0);
862 spin_lock(&mm->page_table_lock);
a89182c7 863 if (!(ret & VM_FAULT_ERROR))
4c887265 864 continue;
63551ae0 865
4c887265
AL
866 remainder = 0;
867 if (!i)
868 i = -EFAULT;
869 break;
870 }
871
d5d4b0aa
CK
872 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
873 page = pte_page(*pte);
874same_page:
d6692183
CK
875 if (pages) {
876 get_page(page);
d5d4b0aa 877 pages[i] = page + pfn_offset;
d6692183 878 }
63551ae0
DG
879
880 if (vmas)
881 vmas[i] = vma;
882
883 vaddr += PAGE_SIZE;
d5d4b0aa 884 ++pfn_offset;
63551ae0
DG
885 --remainder;
886 ++i;
d5d4b0aa
CK
887 if (vaddr < vma->vm_end && remainder &&
888 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
889 /*
890 * We use pfn_offset to avoid touching the pageframes
891 * of this compound page.
892 */
893 goto same_page;
894 }
63551ae0 895 }
1c59827d 896 spin_unlock(&mm->page_table_lock);
63551ae0
DG
897 *length = remainder;
898 *position = vaddr;
899
900 return i;
901}
8f860591
ZY
902
903void hugetlb_change_protection(struct vm_area_struct *vma,
904 unsigned long address, unsigned long end, pgprot_t newprot)
905{
906 struct mm_struct *mm = vma->vm_mm;
907 unsigned long start = address;
908 pte_t *ptep;
909 pte_t pte;
910
911 BUG_ON(address >= end);
912 flush_cache_range(vma, address, end);
913
39dde65c 914 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
8f860591
ZY
915 spin_lock(&mm->page_table_lock);
916 for (; address < end; address += HPAGE_SIZE) {
917 ptep = huge_pte_offset(mm, address);
918 if (!ptep)
919 continue;
39dde65c
CK
920 if (huge_pmd_unshare(mm, &address, ptep))
921 continue;
8f860591
ZY
922 if (!pte_none(*ptep)) {
923 pte = huge_ptep_get_and_clear(mm, address, ptep);
924 pte = pte_mkhuge(pte_modify(pte, newprot));
925 set_huge_pte_at(mm, address, ptep, pte);
8f860591
ZY
926 }
927 }
928 spin_unlock(&mm->page_table_lock);
39dde65c 929 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
8f860591
ZY
930
931 flush_tlb_range(vma, start, end);
932}
933
a43a8c39
CK
934struct file_region {
935 struct list_head link;
936 long from;
937 long to;
938};
939
940static long region_add(struct list_head *head, long f, long t)
941{
942 struct file_region *rg, *nrg, *trg;
943
944 /* Locate the region we are either in or before. */
945 list_for_each_entry(rg, head, link)
946 if (f <= rg->to)
947 break;
948
949 /* Round our left edge to the current segment if it encloses us. */
950 if (f > rg->from)
951 f = rg->from;
952
953 /* Check for and consume any regions we now overlap with. */
954 nrg = rg;
955 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
956 if (&rg->link == head)
957 break;
958 if (rg->from > t)
959 break;
960
961 /* If this area reaches higher then extend our area to
962 * include it completely. If this is not the first area
963 * which we intend to reuse, free it. */
964 if (rg->to > t)
965 t = rg->to;
966 if (rg != nrg) {
967 list_del(&rg->link);
968 kfree(rg);
969 }
970 }
971 nrg->from = f;
972 nrg->to = t;
973 return 0;
974}
975
976static long region_chg(struct list_head *head, long f, long t)
977{
978 struct file_region *rg, *nrg;
979 long chg = 0;
980
981 /* Locate the region we are before or in. */
982 list_for_each_entry(rg, head, link)
983 if (f <= rg->to)
984 break;
985
986 /* If we are below the current region then a new region is required.
987 * Subtle, allocate a new region at the position but make it zero
988 * size such that we can guarentee to record the reservation. */
989 if (&rg->link == head || t < rg->from) {
990 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
991 if (nrg == 0)
992 return -ENOMEM;
993 nrg->from = f;
994 nrg->to = f;
995 INIT_LIST_HEAD(&nrg->link);
996 list_add(&nrg->link, rg->link.prev);
997
998 return t - f;
999 }
1000
1001 /* Round our left edge to the current segment if it encloses us. */
1002 if (f > rg->from)
1003 f = rg->from;
1004 chg = t - f;
1005
1006 /* Check for and consume any regions we now overlap with. */
1007 list_for_each_entry(rg, rg->link.prev, link) {
1008 if (&rg->link == head)
1009 break;
1010 if (rg->from > t)
1011 return chg;
1012
1013 /* We overlap with this area, if it extends futher than
1014 * us then we must extend ourselves. Account for its
1015 * existing reservation. */
1016 if (rg->to > t) {
1017 chg += rg->to - t;
1018 t = rg->to;
1019 }
1020 chg -= rg->to - rg->from;
1021 }
1022 return chg;
1023}
1024
1025static long region_truncate(struct list_head *head, long end)
1026{
1027 struct file_region *rg, *trg;
1028 long chg = 0;
1029
1030 /* Locate the region we are either in or before. */
1031 list_for_each_entry(rg, head, link)
1032 if (end <= rg->to)
1033 break;
1034 if (&rg->link == head)
1035 return 0;
1036
1037 /* If we are in the middle of a region then adjust it. */
1038 if (end > rg->from) {
1039 chg = rg->to - end;
1040 rg->to = end;
1041 rg = list_entry(rg->link.next, typeof(*rg), link);
1042 }
1043
1044 /* Drop any remaining regions. */
1045 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1046 if (&rg->link == head)
1047 break;
1048 chg += rg->to - rg->from;
1049 list_del(&rg->link);
1050 kfree(rg);
1051 }
1052 return chg;
1053}
1054
1055static int hugetlb_acct_memory(long delta)
1056{
1057 int ret = -ENOMEM;
1058
1059 spin_lock(&hugetlb_lock);
8a630112
KC
1060 /*
1061 * When cpuset is configured, it breaks the strict hugetlb page
1062 * reservation as the accounting is done on a global variable. Such
1063 * reservation is completely rubbish in the presence of cpuset because
1064 * the reservation is not checked against page availability for the
1065 * current cpuset. Application can still potentially OOM'ed by kernel
1066 * with lack of free htlb page in cpuset that the task is in.
1067 * Attempt to enforce strict accounting with cpuset is almost
1068 * impossible (or too ugly) because cpuset is too fluid that
1069 * task or memory node can be dynamically moved between cpusets.
1070 *
1071 * The change of semantics for shared hugetlb mapping with cpuset is
1072 * undesirable. However, in order to preserve some of the semantics,
1073 * we fall back to check against current free page availability as
1074 * a best attempt and hopefully to minimize the impact of changing
1075 * semantics that cpuset has.
1076 */
e4e574b7
AL
1077 if (delta > 0) {
1078 if (gather_surplus_pages(delta) < 0)
1079 goto out;
1080
1081 if (delta > cpuset_mems_nr(free_huge_pages_node))
1082 goto out;
1083 }
1084
1085 ret = 0;
1086 resv_huge_pages += delta;
1087 if (delta < 0)
1088 return_unused_surplus_pages((unsigned long) -delta);
1089
1090out:
1091 spin_unlock(&hugetlb_lock);
1092 return ret;
1093}
1094
1095int hugetlb_reserve_pages(struct inode *inode, long from, long to)
1096{
1097 long ret, chg;
1098
1099 chg = region_chg(&inode->i_mapping->private_list, from, to);
1100 if (chg < 0)
1101 return chg;
8a630112 1102
a43a8c39
CK
1103 ret = hugetlb_acct_memory(chg);
1104 if (ret < 0)
1105 return ret;
1106 region_add(&inode->i_mapping->private_list, from, to);
1107 return 0;
1108}
1109
1110void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
1111{
1112 long chg = region_truncate(&inode->i_mapping->private_list, offset);
1113 hugetlb_acct_memory(freed - chg);
1114}