2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
13 #include <linux/types.h>
15 #include <linux/hugetlb.h>
16 #include <linux/syscalls.h>
18 #include <asm/pgtable.h>
19 #include <linux/uaccess.h>
20 #include <asm/tlbflush.h>
23 * Free all pages allocated for subpage protection maps and pointers.
24 * Also makes sure that the subpage_prot_table structure is
25 * reinitialized for the next user.
27 void subpage_prot_free(struct mm_struct *mm)
29 struct subpage_prot_table *spt = &mm->context.spt;
30 unsigned long i, j, addr;
33 for (i = 0; i < 4; ++i) {
34 if (spt->low_prot[i]) {
35 free_page((unsigned long)spt->low_prot[i]);
36 spt->low_prot[i] = NULL;
40 for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
44 spt->protptrs[i] = NULL;
45 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
46 ++j, addr += PAGE_SIZE)
48 free_page((unsigned long)p[j]);
49 free_page((unsigned long)p);
54 void subpage_prot_init_new_context(struct mm_struct *mm)
56 struct subpage_prot_table *spt = &mm->context.spt;
58 memset(spt, 0, sizeof(*spt));
61 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
70 pgd = pgd_offset(mm, addr);
73 pud = pud_offset(pgd, addr);
76 pmd = pmd_offset(pud, addr);
79 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
80 arch_enter_lazy_mmu_mode();
81 for (; npages > 0; --npages) {
82 pte_update(mm, addr, pte, 0, 0, 0);
86 arch_leave_lazy_mmu_mode();
87 pte_unmap_unlock(pte - 1, ptl);
91 * Clear the subpage protection map for an address range, allowing
92 * all accesses that are allowed by the pte permissions.
94 static void subpage_prot_clear(unsigned long addr, unsigned long len)
96 struct mm_struct *mm = current->mm;
97 struct subpage_prot_table *spt = &mm->context.spt;
101 unsigned long next, limit;
103 down_write(&mm->mmap_sem);
105 if (limit > spt->maxaddr)
106 limit = spt->maxaddr;
107 for (; addr < limit; addr = next) {
108 next = pmd_addr_end(addr, limit);
109 if (addr < 0x100000000UL) {
112 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
116 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
119 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
121 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
122 nw = PTRS_PER_PTE - i;
123 if (addr + (nw << PAGE_SHIFT) > next)
124 nw = (next - addr) >> PAGE_SHIFT;
126 memset(spp, 0, nw * sizeof(u32));
128 /* now flush any existing HPTEs for the range */
129 hpte_flush_range(mm, addr, nw);
131 up_write(&mm->mmap_sem);
134 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
135 static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
136 unsigned long end, struct mm_walk *walk)
138 struct vm_area_struct *vma = walk->vma;
139 split_huge_pmd(vma, pmd, addr);
143 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
146 struct vm_area_struct *vma;
147 struct mm_walk subpage_proto_walk = {
149 .pmd_entry = subpage_walk_pmd_entry,
153 * We don't try too hard, we just mark all the vma in that range
154 * VM_NOHUGEPAGE and split them.
156 vma = find_vma(mm, addr);
158 * If the range is in unmapped range, just return
160 if (vma && ((addr + len) <= vma->vm_start))
164 if (vma->vm_start >= (addr + len))
166 vma->vm_flags |= VM_NOHUGEPAGE;
167 walk_page_vma(vma, &subpage_proto_walk);
172 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
180 * Copy in a subpage protection map for an address range.
181 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
182 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
183 * 2 or 3 to prevent all accesses.
184 * Note that the normal page protections also apply; the subpage
185 * protection mechanism is an additional constraint, so putting 0
186 * in a 2-bit field won't allow writes to a page that is otherwise
189 SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
190 unsigned long, len, u32 __user *, map)
192 struct mm_struct *mm = current->mm;
193 struct subpage_prot_table *spt = &mm->context.spt;
197 unsigned long next, limit;
203 /* Check parameters */
204 if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
205 addr >= mm->task_size || len >= mm->task_size ||
206 addr + len > mm->task_size)
209 if (is_hugepage_only_range(mm, addr, len))
213 /* Clear out the protection map for the address range */
214 subpage_prot_clear(addr, len);
218 if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
221 down_write(&mm->mmap_sem);
222 subpage_mark_vma_nohuge(mm, addr, len);
223 for (limit = addr + len; addr < limit; addr = next) {
224 next = pmd_addr_end(addr, limit);
226 if (addr < 0x100000000UL) {
229 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
231 spm = (u32 **)get_zeroed_page(GFP_KERNEL);
234 spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
237 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
240 spp = (u32 *)get_zeroed_page(GFP_KERNEL);
245 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
248 demote_segment_4k(mm, addr);
251 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
252 nw = PTRS_PER_PTE - i;
253 if (addr + (nw << PAGE_SHIFT) > next)
254 nw = (next - addr) >> PAGE_SHIFT;
256 up_write(&mm->mmap_sem);
257 if (__copy_from_user(spp, map, nw * sizeof(u32)))
260 down_write(&mm->mmap_sem);
262 /* now flush any existing HPTEs for the range */
263 hpte_flush_range(mm, addr, nw);
265 if (limit > spt->maxaddr)
266 spt->maxaddr = limit;
269 up_write(&mm->mmap_sem);