Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/fremap.c | |
3 | * | |
4 | * Explicit pagetable population and nonlinear (random) mappings support. | |
5 | * | |
6 | * started by Ingo Molnar, Copyright (C) 2002, 2003 | |
7 | */ | |
4af3c9cc | 8 | #include <linux/backing-dev.h> |
1da177e4 LT |
9 | #include <linux/mm.h> |
10 | #include <linux/swap.h> | |
11 | #include <linux/file.h> | |
12 | #include <linux/mman.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/swapops.h> | |
15 | #include <linux/rmap.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/syscalls.h> | |
cddb8a5c | 18 | #include <linux/mmu_notifier.h> |
1da177e4 LT |
19 | |
20 | #include <asm/mmu_context.h> | |
21 | #include <asm/cacheflush.h> | |
22 | #include <asm/tlbflush.h> | |
23 | ||
ba470de4 RR |
24 | #include "internal.h" |
25 | ||
d0217ac0 | 26 | static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
1da177e4 LT |
27 | unsigned long addr, pte_t *ptep) |
28 | { | |
29 | pte_t pte = *ptep; | |
30 | ||
1da177e4 | 31 | if (pte_present(pte)) { |
d0217ac0 NP |
32 | struct page *page; |
33 | ||
6aab341e | 34 | flush_cache_page(vma, addr, pte_pfn(pte)); |
1da177e4 | 35 | pte = ptep_clear_flush(vma, addr, ptep); |
6aab341e LT |
36 | page = vm_normal_page(vma, addr, pte); |
37 | if (page) { | |
38 | if (pte_dirty(pte)) | |
39 | set_page_dirty(page); | |
edc315fd | 40 | page_remove_rmap(page); |
6aab341e | 41 | page_cache_release(page); |
d0217ac0 | 42 | update_hiwater_rss(mm); |
d559db08 | 43 | dec_mm_counter(mm, MM_FILEPAGES); |
1da177e4 LT |
44 | } |
45 | } else { | |
46 | if (!pte_file(pte)) | |
47 | free_swap_and_cache(pte_to_swp_entry(pte)); | |
9888a1ca | 48 | pte_clear_not_present_full(mm, addr, ptep, 0); |
1da177e4 LT |
49 | } |
50 | } | |
51 | ||
1da177e4 LT |
52 | /* |
53 | * Install a file pte to a given virtual memory address, release any | |
54 | * previously existing mapping. | |
55 | */ | |
d0217ac0 | 56 | static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
1da177e4 LT |
57 | unsigned long addr, unsigned long pgoff, pgprot_t prot) |
58 | { | |
59 | int err = -ENOMEM; | |
60 | pte_t *pte; | |
c74df32c | 61 | spinlock_t *ptl; |
1da177e4 | 62 | |
c9cfcddf | 63 | pte = get_locked_pte(mm, addr, &ptl); |
1da177e4 | 64 | if (!pte) |
c74df32c | 65 | goto out; |
1da177e4 | 66 | |
d0217ac0 NP |
67 | if (!pte_none(*pte)) |
68 | zap_pte(mm, vma, addr, pte); | |
1da177e4 LT |
69 | |
70 | set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); | |
668e0d8f HD |
71 | /* |
72 | * We don't need to run update_mmu_cache() here because the "file pte" | |
73 | * being installed by install_file_pte() is not a real pte - it's a | |
74 | * non-present entry (like a swap entry), noting what file offset should | |
75 | * be mapped there when there's a fault (in a non-linear vma where | |
76 | * that's not obvious). | |
77 | */ | |
c74df32c HD |
78 | pte_unmap_unlock(pte, ptl); |
79 | err = 0; | |
80 | out: | |
1da177e4 LT |
81 | return err; |
82 | } | |
83 | ||
54cb8821 NP |
84 | static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma, |
85 | unsigned long addr, unsigned long size, pgoff_t pgoff) | |
86 | { | |
87 | int err; | |
88 | ||
89 | do { | |
90 | err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot); | |
91 | if (err) | |
92 | return err; | |
93 | ||
94 | size -= PAGE_SIZE; | |
95 | addr += PAGE_SIZE; | |
96 | pgoff++; | |
97 | } while (size); | |
98 | ||
99 | return 0; | |
100 | ||
101 | } | |
102 | ||
8d63494f RD |
103 | /** |
104 | * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma | |
1da177e4 LT |
105 | * @start: start of the remapped virtual memory range |
106 | * @size: size of the remapped virtual memory range | |
8d63494f RD |
107 | * @prot: new protection bits of the range (see NOTE) |
108 | * @pgoff: to-be-mapped page of the backing store file | |
1da177e4 LT |
109 | * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO. |
110 | * | |
8d63494f RD |
111 | * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma |
112 | * (shared backing store file). | |
113 | * | |
114 | * This syscall works purely via pagetables, so it's the most efficient | |
1da177e4 LT |
115 | * way to map the same (large) file into a given virtual window. Unlike |
116 | * mmap()/mremap() it does not create any new vmas. The new mappings are | |
117 | * also safe across swapout. | |
118 | * | |
7682486b | 119 | * NOTE: the @prot parameter right now is ignored (but must be zero), |
8d63494f RD |
120 | * and the vma's default protection is used. Arbitrary protections |
121 | * might be implemented in the future. | |
1da177e4 | 122 | */ |
6a6160a7 HC |
123 | SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, |
124 | unsigned long, prot, unsigned long, pgoff, unsigned long, flags) | |
1da177e4 LT |
125 | { |
126 | struct mm_struct *mm = current->mm; | |
127 | struct address_space *mapping; | |
128 | unsigned long end = start + size; | |
129 | struct vm_area_struct *vma; | |
130 | int err = -EINVAL; | |
131 | int has_write_lock = 0; | |
132 | ||
8d63494f | 133 | if (prot) |
1da177e4 LT |
134 | return err; |
135 | /* | |
136 | * Sanitize the syscall parameters: | |
137 | */ | |
138 | start = start & PAGE_MASK; | |
139 | size = size & PAGE_MASK; | |
140 | ||
141 | /* Does the address range wrap, or is the span zero-sized? */ | |
142 | if (start + size <= start) | |
143 | return err; | |
144 | ||
145 | /* Can we represent this offset inside this architecture's pte's? */ | |
146 | #if PTE_FILE_MAX_BITS < BITS_PER_LONG | |
147 | if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) | |
148 | return err; | |
149 | #endif | |
150 | ||
151 | /* We need down_write() to change vma->vm_flags. */ | |
152 | down_read(&mm->mmap_sem); | |
153 | retry: | |
154 | vma = find_vma(mm, start); | |
155 | ||
156 | /* | |
157 | * Make sure the vma is shared, that it supports prefaulting, | |
158 | * and that the remapped range is valid and fully within | |
159 | * the single existing vma. vm_private_data is used as a | |
101d2be7 | 160 | * swapout cursor in a VM_NONLINEAR vma. |
1da177e4 | 161 | */ |
54cb8821 NP |
162 | if (!vma || !(vma->vm_flags & VM_SHARED)) |
163 | goto out; | |
164 | ||
165 | if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) | |
166 | goto out; | |
167 | ||
dd204d63 | 168 | if (!(vma->vm_flags & VM_CAN_NONLINEAR)) |
54cb8821 NP |
169 | goto out; |
170 | ||
171 | if (end <= start || start < vma->vm_start || end > vma->vm_end) | |
172 | goto out; | |
173 | ||
174 | /* Must set VM_NONLINEAR before any pages are populated. */ | |
175 | if (!(vma->vm_flags & VM_NONLINEAR)) { | |
176 | /* Don't need a nonlinear mapping, exit success */ | |
177 | if (pgoff == linear_page_index(vma, start)) { | |
178 | err = 0; | |
179 | goto out; | |
180 | } | |
181 | ||
182 | if (!has_write_lock) { | |
183 | up_read(&mm->mmap_sem); | |
184 | down_write(&mm->mmap_sem); | |
185 | has_write_lock = 1; | |
186 | goto retry; | |
187 | } | |
188 | mapping = vma->vm_file->f_mapping; | |
3ee6dafc MS |
189 | /* |
190 | * page_mkclean doesn't work on nonlinear vmas, so if | |
191 | * dirty pages need to be accounted, emulate with linear | |
192 | * vmas. | |
193 | */ | |
194 | if (mapping_cap_account_dirty(mapping)) { | |
195 | unsigned long addr; | |
8a459e44 | 196 | struct file *file = vma->vm_file; |
3ee6dafc MS |
197 | |
198 | flags &= MAP_NONBLOCK; | |
8a459e44 ON |
199 | get_file(file); |
200 | addr = mmap_region(file, start, size, | |
5a6fe125 | 201 | flags, vma->vm_flags, pgoff); |
8a459e44 | 202 | fput(file); |
3ee6dafc MS |
203 | if (IS_ERR_VALUE(addr)) { |
204 | err = addr; | |
205 | } else { | |
206 | BUG_ON(addr != start); | |
207 | err = 0; | |
208 | } | |
209 | goto out; | |
210 | } | |
54cb8821 NP |
211 | spin_lock(&mapping->i_mmap_lock); |
212 | flush_dcache_mmap_lock(mapping); | |
213 | vma->vm_flags |= VM_NONLINEAR; | |
214 | vma_prio_tree_remove(vma, &mapping->i_mmap); | |
215 | vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); | |
216 | flush_dcache_mmap_unlock(mapping); | |
217 | spin_unlock(&mapping->i_mmap_lock); | |
218 | } | |
219 | ||
ba470de4 RR |
220 | if (vma->vm_flags & VM_LOCKED) { |
221 | /* | |
222 | * drop PG_Mlocked flag for over-mapped range | |
223 | */ | |
224 | unsigned int saved_flags = vma->vm_flags; | |
225 | munlock_vma_pages_range(vma, start, start + size); | |
226 | vma->vm_flags = saved_flags; | |
227 | } | |
228 | ||
cddb8a5c | 229 | mmu_notifier_invalidate_range_start(mm, start, start + size); |
d0217ac0 | 230 | err = populate_range(mm, vma, start, size, pgoff); |
cddb8a5c | 231 | mmu_notifier_invalidate_range_end(mm, start, start + size); |
d0217ac0 | 232 | if (!err && !(flags & MAP_NONBLOCK)) { |
ba470de4 RR |
233 | if (vma->vm_flags & VM_LOCKED) { |
234 | /* | |
235 | * might be mapping previously unmapped range of file | |
236 | */ | |
237 | mlock_vma_pages_range(vma, start, start + size); | |
238 | } else { | |
239 | if (unlikely(has_write_lock)) { | |
240 | downgrade_write(&mm->mmap_sem); | |
241 | has_write_lock = 0; | |
242 | } | |
243 | make_pages_present(start, start+size); | |
1da177e4 | 244 | } |
d0217ac0 | 245 | } |
1da177e4 | 246 | |
54cb8821 NP |
247 | /* |
248 | * We can't clear VM_NONLINEAR because we'd have to do | |
249 | * it after ->populate completes, and that would prevent | |
250 | * downgrading the lock. (Locks can't be upgraded). | |
251 | */ | |
1da177e4 | 252 | |
54cb8821 | 253 | out: |
1da177e4 LT |
254 | if (likely(!has_write_lock)) |
255 | up_read(&mm->mmap_sem); | |
256 | else | |
257 | up_write(&mm->mmap_sem); | |
258 | ||
259 | return err; | |
260 | } |