mm: Check if PTE is already allocated during page fault
[linux-2.6-block.git] / mm / mprotect.c
CommitLineData
1da177e4
LT
1/*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
046c6884 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
1da177e4
LT
13#include <linux/shm.h>
14#include <linux/mman.h>
15#include <linux/fs.h>
16#include <linux/highmem.h>
17#include <linux/security.h>
18#include <linux/mempolicy.h>
19#include <linux/personality.h>
20#include <linux/syscalls.h>
0697212a
CL
21#include <linux/swap.h>
22#include <linux/swapops.h>
cddb8a5c 23#include <linux/mmu_notifier.h>
64cdd548 24#include <linux/migrate.h>
cdd6c482 25#include <linux/perf_event.h>
1da177e4
LT
26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30
1c12c4cf
VP
31#ifndef pgprot_modify
32static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
33{
34 return newprot;
35}
36#endif
37
1da177e4 38static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
c1e6098b
PZ
39 unsigned long addr, unsigned long end, pgprot_t newprot,
40 int dirty_accountable)
1da177e4 41{
0697212a 42 pte_t *pte, oldpte;
705e87c0 43 spinlock_t *ptl;
1da177e4 44
705e87c0 45 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
6606c3e0 46 arch_enter_lazy_mmu_mode();
1da177e4 47 do {
0697212a
CL
48 oldpte = *pte;
49 if (pte_present(oldpte)) {
1da177e4
LT
50 pte_t ptent;
51
1ea0704e 52 ptent = ptep_modify_prot_start(mm, addr, pte);
c1e6098b 53 ptent = pte_modify(ptent, newprot);
1ea0704e 54
c1e6098b
PZ
55 /*
56 * Avoid taking write faults for pages we know to be
57 * dirty.
58 */
59 if (dirty_accountable && pte_dirty(ptent))
60 ptent = pte_mkwrite(ptent);
1ea0704e
JF
61
62 ptep_modify_prot_commit(mm, addr, pte, ptent);
ce1744f4 63 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
0697212a
CL
64 swp_entry_t entry = pte_to_swp_entry(oldpte);
65
66 if (is_write_migration_entry(entry)) {
67 /*
68 * A protection check is difficult so
69 * just be safe and disable write
70 */
71 make_migration_entry_read(&entry);
72 set_pte_at(mm, addr, pte,
73 swp_entry_to_pte(entry));
74 }
1da177e4
LT
75 }
76 } while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0 77 arch_leave_lazy_mmu_mode();
705e87c0 78 pte_unmap_unlock(pte - 1, ptl);
1da177e4
LT
79}
80
b36f5b07 81static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
c1e6098b
PZ
82 unsigned long addr, unsigned long end, pgprot_t newprot,
83 int dirty_accountable)
1da177e4
LT
84{
85 pmd_t *pmd;
86 unsigned long next;
87
88 pmd = pmd_offset(pud, addr);
89 do {
90 next = pmd_addr_end(addr, end);
cd7548ab
JW
91 if (pmd_trans_huge(*pmd)) {
92 if (next - addr != HPAGE_PMD_SIZE)
93 split_huge_page_pmd(vma->vm_mm, pmd);
94 else if (change_huge_pmd(vma, pmd, addr, newprot))
95 continue;
96 /* fall through */
97 }
1da177e4
LT
98 if (pmd_none_or_clear_bad(pmd))
99 continue;
b36f5b07
JW
100 change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
101 dirty_accountable);
1da177e4
LT
102 } while (pmd++, addr = next, addr != end);
103}
104
b36f5b07 105static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
c1e6098b
PZ
106 unsigned long addr, unsigned long end, pgprot_t newprot,
107 int dirty_accountable)
1da177e4
LT
108{
109 pud_t *pud;
110 unsigned long next;
111
112 pud = pud_offset(pgd, addr);
113 do {
114 next = pud_addr_end(addr, end);
115 if (pud_none_or_clear_bad(pud))
116 continue;
b36f5b07
JW
117 change_pmd_range(vma, pud, addr, next, newprot,
118 dirty_accountable);
1da177e4
LT
119 } while (pud++, addr = next, addr != end);
120}
121
122static void change_protection(struct vm_area_struct *vma,
c1e6098b
PZ
123 unsigned long addr, unsigned long end, pgprot_t newprot,
124 int dirty_accountable)
1da177e4
LT
125{
126 struct mm_struct *mm = vma->vm_mm;
127 pgd_t *pgd;
128 unsigned long next;
129 unsigned long start = addr;
130
131 BUG_ON(addr >= end);
132 pgd = pgd_offset(mm, addr);
133 flush_cache_range(vma, addr, end);
1da177e4
LT
134 do {
135 next = pgd_addr_end(addr, end);
136 if (pgd_none_or_clear_bad(pgd))
137 continue;
b36f5b07
JW
138 change_pud_range(vma, pgd, addr, next, newprot,
139 dirty_accountable);
1da177e4
LT
140 } while (pgd++, addr = next, addr != end);
141 flush_tlb_range(vma, start, end);
1da177e4
LT
142}
143
b6a2fea3 144int
1da177e4
LT
145mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
146 unsigned long start, unsigned long end, unsigned long newflags)
147{
148 struct mm_struct *mm = vma->vm_mm;
149 unsigned long oldflags = vma->vm_flags;
150 long nrpages = (end - start) >> PAGE_SHIFT;
151 unsigned long charged = 0;
1da177e4
LT
152 pgoff_t pgoff;
153 int error;
c1e6098b 154 int dirty_accountable = 0;
1da177e4
LT
155
156 if (newflags == oldflags) {
157 *pprev = vma;
158 return 0;
159 }
160
161 /*
162 * If we make a private mapping writable we increase our commit;
163 * but (without finer accounting) cannot reduce our commit if we
5a6fe125
MG
164 * make it unwritable again. hugetlb mapping were accounted for
165 * even if read-only so there is no need to account for them here
1da177e4
LT
166 */
167 if (newflags & VM_WRITE) {
5a6fe125 168 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
cdfd4325 169 VM_SHARED|VM_NORESERVE))) {
1da177e4 170 charged = nrpages;
191c5424 171 if (security_vm_enough_memory_mm(mm, charged))
1da177e4
LT
172 return -ENOMEM;
173 newflags |= VM_ACCOUNT;
174 }
175 }
176
1da177e4
LT
177 /*
178 * First try to merge with previous and/or next vma.
179 */
180 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
181 *pprev = vma_merge(mm, *pprev, start, end, newflags,
182 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
183 if (*pprev) {
184 vma = *pprev;
185 goto success;
186 }
187
188 *pprev = vma;
189
190 if (start != vma->vm_start) {
191 error = split_vma(mm, vma, start, 1);
192 if (error)
193 goto fail;
194 }
195
196 if (end != vma->vm_end) {
197 error = split_vma(mm, vma, end, 0);
198 if (error)
199 goto fail;
200 }
201
202success:
203 /*
204 * vm_flags and vm_page_prot are protected by the mmap_sem
205 * held in write mode.
206 */
207 vma->vm_flags = newflags;
1c12c4cf
VP
208 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
209 vm_get_page_prot(newflags));
210
c1e6098b 211 if (vma_wants_writenotify(vma)) {
1ddd439e 212 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
c1e6098b
PZ
213 dirty_accountable = 1;
214 }
d08b3851 215
cddb8a5c 216 mmu_notifier_invalidate_range_start(mm, start, end);
8f860591 217 if (is_vm_hugetlb_page(vma))
d08b3851 218 hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
8f860591 219 else
c1e6098b 220 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
cddb8a5c 221 mmu_notifier_invalidate_range_end(mm, start, end);
ab50b8ed
HD
222 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
223 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
63bfd738 224 perf_event_mmap(vma);
1da177e4
LT
225 return 0;
226
227fail:
228 vm_unacct_memory(charged);
229 return error;
230}
231
6a6160a7
HC
232SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
233 unsigned long, prot)
1da177e4
LT
234{
235 unsigned long vm_flags, nstart, end, tmp, reqprot;
236 struct vm_area_struct *vma, *prev;
237 int error = -EINVAL;
238 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
239 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
240 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
241 return -EINVAL;
242
243 if (start & ~PAGE_MASK)
244 return -EINVAL;
245 if (!len)
246 return 0;
247 len = PAGE_ALIGN(len);
248 end = start + len;
249 if (end <= start)
250 return -ENOMEM;
b845f313 251 if (!arch_validate_prot(prot))
1da177e4
LT
252 return -EINVAL;
253
254 reqprot = prot;
255 /*
256 * Does the application expect PROT_READ to imply PROT_EXEC:
257 */
b344e05c 258 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1da177e4
LT
259 prot |= PROT_EXEC;
260
261 vm_flags = calc_vm_prot_bits(prot);
262
263 down_write(&current->mm->mmap_sem);
264
097d5910 265 vma = find_vma(current->mm, start);
1da177e4
LT
266 error = -ENOMEM;
267 if (!vma)
268 goto out;
097d5910 269 prev = vma->vm_prev;
1da177e4
LT
270 if (unlikely(grows & PROT_GROWSDOWN)) {
271 if (vma->vm_start >= end)
272 goto out;
273 start = vma->vm_start;
274 error = -EINVAL;
275 if (!(vma->vm_flags & VM_GROWSDOWN))
276 goto out;
277 }
278 else {
279 if (vma->vm_start > start)
280 goto out;
281 if (unlikely(grows & PROT_GROWSUP)) {
282 end = vma->vm_end;
283 error = -EINVAL;
284 if (!(vma->vm_flags & VM_GROWSUP))
285 goto out;
286 }
287 }
288 if (start > vma->vm_start)
289 prev = vma;
290
291 for (nstart = start ; ; ) {
292 unsigned long newflags;
293
294 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
295
1da177e4
LT
296 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
297
7e2cff42
PBG
298 /* newflags >> 4 shift VM_MAY% in place of VM_% */
299 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
1da177e4
LT
300 error = -EACCES;
301 goto out;
302 }
303
304 error = security_file_mprotect(vma, reqprot, prot);
305 if (error)
306 goto out;
307
308 tmp = vma->vm_end;
309 if (tmp > end)
310 tmp = end;
311 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
312 if (error)
313 goto out;
314 nstart = tmp;
315
316 if (nstart < prev->vm_end)
317 nstart = prev->vm_end;
318 if (nstart >= end)
319 goto out;
320
321 vma = prev->vm_next;
322 if (!vma || vma->vm_start != nstart) {
323 error = -ENOMEM;
324 goto out;
325 }
326 }
327out:
328 up_write(&current->mm->mmap_sem);
329 return error;
330}