Commit | Line | Data |
---|---|---|
8174c430 NP |
1 | /* |
2 | * Lockless get_user_pages_fast for x86 | |
3 | * | |
4 | * Copyright (C) 2008 Nick Piggin | |
5 | * Copyright (C) 2008 Novell Inc. | |
6 | */ | |
7 | #include <linux/sched.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/vmstat.h> | |
10 | #include <linux/highmem.h> | |
11 | ||
12 | #include <asm/pgtable.h> | |
13 | ||
14 | static inline pte_t gup_get_pte(pte_t *ptep) | |
15 | { | |
16 | #ifndef CONFIG_X86_PAE | |
0c871971 | 17 | return ACCESS_ONCE(*ptep); |
8174c430 NP |
18 | #else |
19 | /* | |
20 | * With get_user_pages_fast, we walk down the pagetables without taking | |
ab09809f | 21 | * any locks. For this we would like to load the pointers atomically, |
8174c430 NP |
22 | * but that is not possible (without expensive cmpxchg8b) on PAE. What |
23 | * we do have is the guarantee that a pte will only either go from not | |
24 | * present to present, or present to not present or both -- it will not | |
25 | * switch to a completely different present page without a TLB flush in | |
26 | * between; something that we are blocking by holding interrupts off. | |
27 | * | |
28 | * Setting ptes from not present to present goes: | |
29 | * ptep->pte_high = h; | |
30 | * smp_wmb(); | |
31 | * ptep->pte_low = l; | |
32 | * | |
33 | * And present to not present goes: | |
34 | * ptep->pte_low = 0; | |
35 | * smp_wmb(); | |
36 | * ptep->pte_high = 0; | |
37 | * | |
38 | * We must ensure here that the load of pte_low sees l iff pte_high | |
39 | * sees h. We load pte_high *after* loading pte_low, which ensures we | |
40 | * don't see an older value of pte_high. *Then* we recheck pte_low, | |
41 | * which ensures that we haven't picked up a changed pte high. We might | |
42 | * have got rubbish values from pte_low and pte_high, but we are | |
43 | * guaranteed that pte_low will not have the present bit set *unless* | |
44 | * it is 'l'. And get_user_pages_fast only operates on present ptes, so | |
45 | * we're safe. | |
46 | * | |
47 | * gup_get_pte should not be used or copied outside gup.c without being | |
48 | * very careful -- it does not atomically load the pte or anything that | |
49 | * is likely to be useful for you. | |
50 | */ | |
51 | pte_t pte; | |
52 | ||
53 | retry: | |
54 | pte.pte_low = ptep->pte_low; | |
55 | smp_rmb(); | |
56 | pte.pte_high = ptep->pte_high; | |
57 | smp_rmb(); | |
58 | if (unlikely(pte.pte_low != ptep->pte_low)) | |
59 | goto retry; | |
60 | ||
61 | return pte; | |
62 | #endif | |
63 | } | |
64 | ||
65 | /* | |
66 | * The performance critical leaf functions are made noinline otherwise gcc | |
67 | * inlines everything into a single function which results in too much | |
68 | * register pressure. | |
69 | */ | |
70 | static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |
71 | unsigned long end, int write, struct page **pages, int *nr) | |
72 | { | |
73 | unsigned long mask; | |
74 | pte_t *ptep; | |
75 | ||
76 | mask = _PAGE_PRESENT|_PAGE_USER; | |
77 | if (write) | |
78 | mask |= _PAGE_RW; | |
79 | ||
80 | ptep = pte_offset_map(&pmd, addr); | |
81 | do { | |
82 | pte_t pte = gup_get_pte(ptep); | |
83 | struct page *page; | |
84 | ||
606ee44d | 85 | if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { |
8174c430 NP |
86 | pte_unmap(ptep); |
87 | return 0; | |
88 | } | |
89 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | |
90 | page = pte_page(pte); | |
91 | get_page(page); | |
92 | pages[*nr] = page; | |
93 | (*nr)++; | |
94 | ||
95 | } while (ptep++, addr += PAGE_SIZE, addr != end); | |
96 | pte_unmap(ptep - 1); | |
97 | ||
98 | return 1; | |
99 | } | |
100 | ||
101 | static inline void get_head_page_multiple(struct page *page, int nr) | |
102 | { | |
103 | VM_BUG_ON(page != compound_head(page)); | |
104 | VM_BUG_ON(page_count(page) == 0); | |
105 | atomic_add(nr, &page->_count); | |
106 | } | |
107 | ||
91807063 AA |
108 | static inline void get_huge_page_tail(struct page *page) |
109 | { | |
110 | /* | |
111 | * __split_huge_page_refcount() cannot run | |
112 | * from under us. | |
113 | */ | |
114 | VM_BUG_ON(atomic_read(&page->_count) < 0); | |
115 | atomic_inc(&page->_count); | |
116 | } | |
117 | ||
8174c430 NP |
118 | static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, |
119 | unsigned long end, int write, struct page **pages, int *nr) | |
120 | { | |
121 | unsigned long mask; | |
122 | pte_t pte = *(pte_t *)&pmd; | |
123 | struct page *head, *page; | |
124 | int refs; | |
125 | ||
126 | mask = _PAGE_PRESENT|_PAGE_USER; | |
127 | if (write) | |
128 | mask |= _PAGE_RW; | |
606ee44d | 129 | if ((pte_flags(pte) & mask) != mask) |
8174c430 NP |
130 | return 0; |
131 | /* hugepages are never "special" */ | |
606ee44d | 132 | VM_BUG_ON(pte_flags(pte) & _PAGE_SPECIAL); |
8174c430 NP |
133 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
134 | ||
135 | refs = 0; | |
136 | head = pte_page(pte); | |
652ea695 | 137 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
8174c430 NP |
138 | do { |
139 | VM_BUG_ON(compound_head(page) != head); | |
140 | pages[*nr] = page; | |
91807063 AA |
141 | if (PageTail(page)) |
142 | get_huge_page_tail(page); | |
8174c430 NP |
143 | (*nr)++; |
144 | page++; | |
145 | refs++; | |
146 | } while (addr += PAGE_SIZE, addr != end); | |
147 | get_head_page_multiple(head, refs); | |
148 | ||
149 | return 1; | |
150 | } | |
151 | ||
152 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |
153 | int write, struct page **pages, int *nr) | |
154 | { | |
155 | unsigned long next; | |
156 | pmd_t *pmdp; | |
157 | ||
158 | pmdp = pmd_offset(&pud, addr); | |
159 | do { | |
160 | pmd_t pmd = *pmdp; | |
161 | ||
162 | next = pmd_addr_end(addr, end); | |
163 | if (pmd_none(pmd)) | |
164 | return 0; | |
165 | if (unlikely(pmd_large(pmd))) { | |
166 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) | |
167 | return 0; | |
168 | } else { | |
169 | if (!gup_pte_range(pmd, addr, next, write, pages, nr)) | |
170 | return 0; | |
171 | } | |
172 | } while (pmdp++, addr = next, addr != end); | |
173 | ||
174 | return 1; | |
175 | } | |
176 | ||
652ea695 NP |
177 | static noinline int gup_huge_pud(pud_t pud, unsigned long addr, |
178 | unsigned long end, int write, struct page **pages, int *nr) | |
179 | { | |
180 | unsigned long mask; | |
181 | pte_t pte = *(pte_t *)&pud; | |
182 | struct page *head, *page; | |
183 | int refs; | |
184 | ||
185 | mask = _PAGE_PRESENT|_PAGE_USER; | |
186 | if (write) | |
187 | mask |= _PAGE_RW; | |
606ee44d | 188 | if ((pte_flags(pte) & mask) != mask) |
652ea695 NP |
189 | return 0; |
190 | /* hugepages are never "special" */ | |
606ee44d | 191 | VM_BUG_ON(pte_flags(pte) & _PAGE_SPECIAL); |
652ea695 NP |
192 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
193 | ||
194 | refs = 0; | |
195 | head = pte_page(pte); | |
196 | page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | |
197 | do { | |
198 | VM_BUG_ON(compound_head(page) != head); | |
199 | pages[*nr] = page; | |
200 | (*nr)++; | |
201 | page++; | |
202 | refs++; | |
203 | } while (addr += PAGE_SIZE, addr != end); | |
204 | get_head_page_multiple(head, refs); | |
205 | ||
206 | return 1; | |
207 | } | |
208 | ||
8174c430 NP |
209 | static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, |
210 | int write, struct page **pages, int *nr) | |
211 | { | |
212 | unsigned long next; | |
213 | pud_t *pudp; | |
214 | ||
215 | pudp = pud_offset(&pgd, addr); | |
216 | do { | |
217 | pud_t pud = *pudp; | |
218 | ||
219 | next = pud_addr_end(addr, end); | |
220 | if (pud_none(pud)) | |
221 | return 0; | |
652ea695 NP |
222 | if (unlikely(pud_large(pud))) { |
223 | if (!gup_huge_pud(pud, addr, next, write, pages, nr)) | |
224 | return 0; | |
225 | } else { | |
226 | if (!gup_pmd_range(pud, addr, next, write, pages, nr)) | |
227 | return 0; | |
228 | } | |
8174c430 NP |
229 | } while (pudp++, addr = next, addr != end); |
230 | ||
231 | return 1; | |
232 | } | |
233 | ||
465a454f PZ |
234 | /* |
235 | * Like get_user_pages_fast() except its IRQ-safe in that it won't fall | |
236 | * back to the regular GUP. | |
237 | */ | |
238 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |
239 | struct page **pages) | |
240 | { | |
241 | struct mm_struct *mm = current->mm; | |
242 | unsigned long addr, len, end; | |
243 | unsigned long next; | |
244 | unsigned long flags; | |
245 | pgd_t *pgdp; | |
246 | int nr = 0; | |
247 | ||
248 | start &= PAGE_MASK; | |
249 | addr = start; | |
250 | len = (unsigned long) nr_pages << PAGE_SHIFT; | |
251 | end = start + len; | |
252 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | |
253 | (void __user *)start, len))) | |
254 | return 0; | |
255 | ||
256 | /* | |
257 | * XXX: batch / limit 'nr', to avoid large irq off latency | |
258 | * needs some instrumenting to determine the common sizes used by | |
259 | * important workloads (eg. DB2), and whether limiting the batch size | |
260 | * will decrease performance. | |
261 | * | |
262 | * It seems like we're in the clear for the moment. Direct-IO is | |
263 | * the main guy that batches up lots of get_user_pages, and even | |
264 | * they are limited to 64-at-a-time which is not so many. | |
265 | */ | |
266 | /* | |
267 | * This doesn't prevent pagetable teardown, but does prevent | |
268 | * the pagetables and pages from being freed on x86. | |
269 | * | |
270 | * So long as we atomically load page table pointers versus teardown | |
271 | * (which we do on x86, with the above PAE exception), we can follow the | |
272 | * address down to the the page and take a ref on it. | |
273 | */ | |
274 | local_irq_save(flags); | |
275 | pgdp = pgd_offset(mm, addr); | |
276 | do { | |
277 | pgd_t pgd = *pgdp; | |
278 | ||
279 | next = pgd_addr_end(addr, end); | |
280 | if (pgd_none(pgd)) | |
281 | break; | |
282 | if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | |
283 | break; | |
284 | } while (pgdp++, addr = next, addr != end); | |
285 | local_irq_restore(flags); | |
286 | ||
287 | return nr; | |
288 | } | |
289 | ||
a0d22f48 AG |
290 | /** |
291 | * get_user_pages_fast() - pin user pages in memory | |
292 | * @start: starting user address | |
293 | * @nr_pages: number of pages from start to pin | |
294 | * @write: whether pages will be written to | |
295 | * @pages: array that receives pointers to the pages pinned. | |
296 | * Should be at least nr_pages long. | |
297 | * | |
298 | * Attempt to pin user pages in memory without taking mm->mmap_sem. | |
299 | * If not successful, it will fall back to taking the lock and | |
300 | * calling get_user_pages(). | |
301 | * | |
302 | * Returns number of pages pinned. This may be fewer than the number | |
303 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
304 | * were pinned, returns -errno. | |
305 | */ | |
8174c430 NP |
306 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
307 | struct page **pages) | |
308 | { | |
309 | struct mm_struct *mm = current->mm; | |
9b79022c | 310 | unsigned long addr, len, end; |
8174c430 NP |
311 | unsigned long next; |
312 | pgd_t *pgdp; | |
313 | int nr = 0; | |
314 | ||
9b79022c LT |
315 | start &= PAGE_MASK; |
316 | addr = start; | |
317 | len = (unsigned long) nr_pages << PAGE_SHIFT; | |
7f818906 | 318 | |
9b79022c | 319 | end = start + len; |
7f818906 LT |
320 | if (end < start) |
321 | goto slow_irqon; | |
322 | ||
323 | #ifdef CONFIG_X86_64 | |
324 | if (end >> __VIRTUAL_MASK_SHIFT) | |
8174c430 | 325 | goto slow_irqon; |
7f818906 | 326 | #endif |
8174c430 NP |
327 | |
328 | /* | |
329 | * XXX: batch / limit 'nr', to avoid large irq off latency | |
330 | * needs some instrumenting to determine the common sizes used by | |
331 | * important workloads (eg. DB2), and whether limiting the batch size | |
332 | * will decrease performance. | |
333 | * | |
334 | * It seems like we're in the clear for the moment. Direct-IO is | |
335 | * the main guy that batches up lots of get_user_pages, and even | |
336 | * they are limited to 64-at-a-time which is not so many. | |
337 | */ | |
338 | /* | |
339 | * This doesn't prevent pagetable teardown, but does prevent | |
340 | * the pagetables and pages from being freed on x86. | |
341 | * | |
342 | * So long as we atomically load page table pointers versus teardown | |
343 | * (which we do on x86, with the above PAE exception), we can follow the | |
344 | * address down to the the page and take a ref on it. | |
345 | */ | |
346 | local_irq_disable(); | |
347 | pgdp = pgd_offset(mm, addr); | |
348 | do { | |
349 | pgd_t pgd = *pgdp; | |
350 | ||
351 | next = pgd_addr_end(addr, end); | |
352 | if (pgd_none(pgd)) | |
353 | goto slow; | |
354 | if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | |
355 | goto slow; | |
356 | } while (pgdp++, addr = next, addr != end); | |
357 | local_irq_enable(); | |
358 | ||
359 | VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); | |
360 | return nr; | |
361 | ||
362 | { | |
363 | int ret; | |
364 | ||
365 | slow: | |
366 | local_irq_enable(); | |
367 | slow_irqon: | |
368 | /* Try to get the remaining pages with get_user_pages */ | |
369 | start += nr << PAGE_SHIFT; | |
370 | pages += nr; | |
371 | ||
372 | down_read(&mm->mmap_sem); | |
373 | ret = get_user_pages(current, mm, start, | |
374 | (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); | |
375 | up_read(&mm->mmap_sem); | |
376 | ||
377 | /* Have to be a bit careful with return values */ | |
378 | if (nr > 0) { | |
379 | if (ret < 0) | |
380 | ret = nr; | |
381 | else | |
382 | ret += nr; | |
383 | } | |
384 | ||
385 | return ret; | |
386 | } | |
387 | } |