| 1 | /* |
| 2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) |
| 3 | * Licensed under the GPL |
| 4 | */ |
| 5 | |
| 6 | #include "linux/config.h" |
| 7 | #include "linux/sched.h" |
| 8 | #include "linux/list.h" |
| 9 | #include "linux/spinlock.h" |
| 10 | #include "linux/slab.h" |
| 11 | #include "linux/errno.h" |
| 12 | #include "linux/mm.h" |
| 13 | #include "asm/current.h" |
| 14 | #include "asm/segment.h" |
| 15 | #include "asm/mmu.h" |
| 16 | #include "asm/pgalloc.h" |
| 17 | #include "asm/pgtable.h" |
| 18 | #include "os.h" |
| 19 | #include "skas.h" |
| 20 | |
| 21 | extern int __syscall_stub_start; |
| 22 | |
| 23 | static int init_stub_pte(struct mm_struct *mm, unsigned long proc, |
| 24 | unsigned long kernel) |
| 25 | { |
| 26 | pgd_t *pgd; |
| 27 | pud_t *pud; |
| 28 | pmd_t *pmd; |
| 29 | pte_t *pte; |
| 30 | |
| 31 | pgd = pgd_offset(mm, proc); |
| 32 | pud = pud_alloc(mm, pgd, proc); |
| 33 | if (!pud) |
| 34 | goto out; |
| 35 | |
| 36 | pmd = pmd_alloc(mm, pud, proc); |
| 37 | if (!pmd) |
| 38 | goto out_pmd; |
| 39 | |
| 40 | pte = pte_alloc_map(mm, pmd, proc); |
| 41 | if (!pte) |
| 42 | goto out_pte; |
| 43 | |
| 44 | /* There's an interaction between the skas0 stub pages, stack |
| 45 | * randomization, and the BUG at the end of exit_mmap. exit_mmap |
| 46 | * checks that the number of page tables freed is the same as had |
| 47 | * been allocated. If the stack is on the last page table page, |
| 48 | * then the stack pte page will be freed, and if not, it won't. To |
| 49 | * avoid having to know where the stack is, or if the process mapped |
| 50 | * something at the top of its address space for some other reason, |
| 51 | * we set TASK_SIZE to end at the start of the last page table. |
| 52 | * This keeps exit_mmap off the last page, but introduces a leak |
| 53 | * of that page. So, we hang onto it here and free it in |
| 54 | * destroy_context_skas. |
| 55 | */ |
| 56 | |
| 57 | mm->context.skas.last_page_table = pmd_page_kernel(*pmd); |
| 58 | #ifdef CONFIG_3_LEVEL_PGTABLES |
| 59 | mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); |
| 60 | #endif |
| 61 | |
| 62 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); |
| 63 | *pte = pte_mkexec(*pte); |
| 64 | *pte = pte_wrprotect(*pte); |
| 65 | return(0); |
| 66 | |
| 67 | out_pmd: |
| 68 | pud_free(pud); |
| 69 | out_pte: |
| 70 | pmd_free(pmd); |
| 71 | out: |
| 72 | return(-ENOMEM); |
| 73 | } |
| 74 | |
| 75 | int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) |
| 76 | { |
| 77 | struct mm_struct *cur_mm = current->mm; |
| 78 | struct mm_id *cur_mm_id = &cur_mm->context.skas.id; |
| 79 | struct mm_id *mm_id = &mm->context.skas.id; |
| 80 | unsigned long stack = 0; |
| 81 | int from, ret = -ENOMEM; |
| 82 | |
| 83 | if(!proc_mm || !ptrace_faultinfo){ |
| 84 | stack = get_zeroed_page(GFP_KERNEL); |
| 85 | if(stack == 0) |
| 86 | goto out; |
| 87 | |
| 88 | /* This zeros the entry that pgd_alloc didn't, needed since |
| 89 | * we are about to reinitialize it, and want mm.nr_ptes to |
| 90 | * be accurate. |
| 91 | */ |
| 92 | mm->pgd[USER_PTRS_PER_PGD] = __pgd(0); |
| 93 | |
| 94 | ret = init_stub_pte(mm, CONFIG_STUB_CODE, |
| 95 | (unsigned long) &__syscall_stub_start); |
| 96 | if(ret) |
| 97 | goto out_free; |
| 98 | |
| 99 | ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); |
| 100 | if(ret) |
| 101 | goto out_free; |
| 102 | |
| 103 | mm->nr_ptes--; |
| 104 | } |
| 105 | mm_id->stack = stack; |
| 106 | |
| 107 | if(proc_mm){ |
| 108 | if((cur_mm != NULL) && (cur_mm != &init_mm)) |
| 109 | from = cur_mm_id->u.mm_fd; |
| 110 | else from = -1; |
| 111 | |
| 112 | ret = new_mm(from, stack); |
| 113 | if(ret < 0){ |
| 114 | printk("init_new_context_skas - new_mm failed, " |
| 115 | "errno = %d\n", ret); |
| 116 | goto out_free; |
| 117 | } |
| 118 | mm_id->u.mm_fd = ret; |
| 119 | } |
| 120 | else { |
| 121 | if((cur_mm != NULL) && (cur_mm != &init_mm)) |
| 122 | mm_id->u.pid = copy_context_skas0(stack, |
| 123 | cur_mm_id->u.pid); |
| 124 | else mm_id->u.pid = start_userspace(stack); |
| 125 | } |
| 126 | |
| 127 | return 0; |
| 128 | |
| 129 | out_free: |
| 130 | if(mm_id->stack != 0) |
| 131 | free_page(mm_id->stack); |
| 132 | out: |
| 133 | return ret; |
| 134 | } |
| 135 | |
| 136 | void destroy_context_skas(struct mm_struct *mm) |
| 137 | { |
| 138 | struct mmu_context_skas *mmu = &mm->context.skas; |
| 139 | |
| 140 | if(proc_mm) |
| 141 | os_close_file(mmu->id.u.mm_fd); |
| 142 | else |
| 143 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
| 144 | |
| 145 | if(!proc_mm || !ptrace_faultinfo){ |
| 146 | free_page(mmu->id.stack); |
| 147 | pte_lock_deinit(virt_to_page(mmu->last_page_table)); |
| 148 | pte_free_kernel((pte_t *) mmu->last_page_table); |
| 149 | dec_page_state(nr_page_table_pages); |
| 150 | #ifdef CONFIG_3_LEVEL_PGTABLES |
| 151 | pmd_free((pmd_t *) mmu->last_pmd); |
| 152 | #endif |
| 153 | } |
| 154 | } |