mm: thp: tail page refcounting fix
[linux-2.6-block.git] / arch / powerpc / mm / gup.c
CommitLineData
ce0ad7f0
NP
1/*
2 * Lockless get_user_pages_fast for powerpc
3 *
4 * Copyright (C) 2008 Nick Piggin
5 * Copyright (C) 2008 Novell Inc.
6 */
7#undef DEBUG
8
9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/hugetlb.h>
12#include <linux/vmstat.h>
13#include <linux/pagemap.h>
14#include <linux/rwsem.h>
15#include <asm/pgtable.h>
16
9e5efaa9
BH
17#ifdef __HAVE_ARCH_PTE_SPECIAL
18
91807063
AA
19static inline void get_huge_page_tail(struct page *page)
20{
21 /*
22 * __split_huge_page_refcount() cannot run
23 * from under us.
24 */
70b50f94
AA
25 VM_BUG_ON(page_mapcount(page) < 0);
26 VM_BUG_ON(atomic_read(&page->_count) != 0);
27 atomic_inc(&page->_mapcount);
91807063
AA
28}
29
ce0ad7f0
NP
30/*
31 * The performance critical leaf functions are made noinline otherwise gcc
32 * inlines everything into a single function which results in too much
33 * register pressure.
34 */
35static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
36 unsigned long end, int write, struct page **pages, int *nr)
37{
38 unsigned long mask, result;
39 pte_t *ptep;
40
41 result = _PAGE_PRESENT|_PAGE_USER;
42 if (write)
43 result |= _PAGE_RW;
44 mask = result | _PAGE_SPECIAL;
45
46 ptep = pte_offset_kernel(&pmd, addr);
47 do {
48 pte_t pte = *ptep;
49 struct page *page;
50
51 if ((pte_val(pte) & mask) != result)
52 return 0;
53 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
54 page = pte_page(pte);
55 if (!page_cache_get_speculative(page))
56 return 0;
f5ea64dc 57 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
ce0ad7f0
NP
58 put_page(page);
59 return 0;
60 }
91807063
AA
61 if (PageTail(page))
62 get_huge_page_tail(page);
ce0ad7f0
NP
63 pages[*nr] = page;
64 (*nr)++;
65
66 } while (ptep++, addr += PAGE_SIZE, addr != end);
67
68 return 1;
69}
70
ce0ad7f0
NP
71static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
72 int write, struct page **pages, int *nr)
73{
74 unsigned long next;
75 pmd_t *pmdp;
76
77 pmdp = pmd_offset(&pud, addr);
78 do {
79 pmd_t pmd = *pmdp;
80
81 next = pmd_addr_end(addr, end);
82 if (pmd_none(pmd))
83 return 0;
a4fe3ce7
DG
84 if (is_hugepd(pmdp)) {
85 if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
86 addr, next, write, pages, nr))
87 return 0;
88 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
ce0ad7f0
NP
89 return 0;
90 } while (pmdp++, addr = next, addr != end);
91
92 return 1;
93}
94
95static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
96 int write, struct page **pages, int *nr)
97{
98 unsigned long next;
99 pud_t *pudp;
100
101 pudp = pud_offset(&pgd, addr);
102 do {
103 pud_t pud = *pudp;
104
105 next = pud_addr_end(addr, end);
106 if (pud_none(pud))
107 return 0;
a4fe3ce7
DG
108 if (is_hugepd(pudp)) {
109 if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
110 addr, next, write, pages, nr))
111 return 0;
112 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
ce0ad7f0
NP
113 return 0;
114 } while (pudp++, addr = next, addr != end);
115
116 return 1;
117}
118
119int get_user_pages_fast(unsigned long start, int nr_pages, int write,
120 struct page **pages)
121{
122 struct mm_struct *mm = current->mm;
123 unsigned long addr, len, end;
124 unsigned long next;
125 pgd_t *pgdp;
9e5efaa9 126 int nr = 0;
ce0ad7f0 127
29e5fa59 128 pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
ce0ad7f0
NP
129
130 start &= PAGE_MASK;
131 addr = start;
132 len = (unsigned long) nr_pages << PAGE_SHIFT;
133 end = start + len;
134
135 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
136 start, len)))
137 goto slow_irqon;
138
29e5fa59 139 pr_devel(" aligned: %lx .. %lx\n", start, end);
ce0ad7f0 140
ce0ad7f0
NP
141 /*
142 * XXX: batch / limit 'nr', to avoid large irq off latency
143 * needs some instrumenting to determine the common sizes used by
144 * important workloads (eg. DB2), and whether limiting the batch size
145 * will decrease performance.
146 *
147 * It seems like we're in the clear for the moment. Direct-IO is
148 * the main guy that batches up lots of get_user_pages, and even
149 * they are limited to 64-at-a-time which is not so many.
150 */
151 /*
152 * This doesn't prevent pagetable teardown, but does prevent
153 * the pagetables from being freed on powerpc.
154 *
155 * So long as we atomically load page table pointers versus teardown,
156 * we can follow the address down to the the page and take a ref on it.
157 */
158 local_irq_disable();
159
a4fe3ce7
DG
160 pgdp = pgd_offset(mm, addr);
161 do {
162 pgd_t pgd = *pgdp;
163
164 pr_devel(" %016lx: normal pgd %p\n", addr,
165 (void *)pgd_val(pgd));
166 next = pgd_addr_end(addr, end);
167 if (pgd_none(pgd))
168 goto slow;
169 if (is_hugepd(pgdp)) {
170 if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
171 addr, next, write, pages, &nr))
ce0ad7f0 172 goto slow;
a4fe3ce7
DG
173 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
174 goto slow;
175 } while (pgdp++, addr = next, addr != end);
176
ce0ad7f0
NP
177 local_irq_enable();
178
179 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
180 return nr;
181
182 {
183 int ret;
184
185slow:
186 local_irq_enable();
187slow_irqon:
29e5fa59 188 pr_devel(" slow path ! nr = %d\n", nr);
ce0ad7f0
NP
189
190 /* Try to get the remaining pages with get_user_pages */
191 start += nr << PAGE_SHIFT;
192 pages += nr;
193
194 down_read(&mm->mmap_sem);
195 ret = get_user_pages(current, mm, start,
196 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
197 up_read(&mm->mmap_sem);
198
199 /* Have to be a bit careful with return values */
200 if (nr > 0) {
201 if (ret < 0)
202 ret = nr;
203 else
204 ret += nr;
205 }
206
207 return ret;
208 }
209}
9e5efaa9
BH
210
211#endif /* __HAVE_ARCH_PTE_SPECIAL */