powerpc: initial pkey plumbing
[linux-2.6-block.git] / arch / powerpc / mm / mmu_context_book3s64.c
CommitLineData
14cf11af
PM
1/*
2 * MMU context allocation for 64-bit kernels.
3 *
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
14cf11af
PM
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/mm.h>
19#include <linux/spinlock.h>
20#include <linux/idr.h>
4b16f8e2 21#include <linux/export.h>
5a0e3ad6 22#include <linux/gfp.h>
851d2e2f 23#include <linux/slab.h>
14cf11af
PM
24
25#include <asm/mmu_context.h>
5c1f6ee9 26#include <asm/pgalloc.h>
14cf11af
PM
27
28static DEFINE_SPINLOCK(mmu_context_lock);
7317ac87 29static DEFINE_IDA(mmu_context_ida);
14cf11af 30
c1ff840d 31static int alloc_context_id(int min_id, int max_id)
14cf11af 32{
c1ff840d 33 int index, err;
14cf11af
PM
34
35again:
7317ac87 36 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
14cf11af
PM
37 return -ENOMEM;
38
39 spin_lock(&mmu_context_lock);
c1ff840d 40 err = ida_get_new_above(&mmu_context_ida, min_id, &index);
14cf11af
PM
41 spin_unlock(&mmu_context_lock);
42
43 if (err == -EAGAIN)
44 goto again;
45 else if (err)
46 return err;
47
c1ff840d 48 if (index > max_id) {
f86c9747 49 spin_lock(&mmu_context_lock);
7317ac87 50 ida_remove(&mmu_context_ida, index);
f86c9747 51 spin_unlock(&mmu_context_lock);
14cf11af
PM
52 return -ENOMEM;
53 }
54
e85a4710
AG
55 return index;
56}
a336f2f5 57
82228e36
AK
58void hash__reserve_context_id(int id)
59{
60 int rc, result = 0;
61
62 do {
63 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
64 break;
65
66 spin_lock(&mmu_context_lock);
67 rc = ida_get_new_above(&mmu_context_ida, id, &result);
68 spin_unlock(&mmu_context_lock);
69 } while (rc == -EAGAIN);
70
71 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
72}
73
a336f2f5
ME
74int hash__alloc_context_id(void)
75{
e6f81a92
AK
76 unsigned long max;
77
78 if (mmu_has_feature(MMU_FTR_68_BIT_VA))
79 max = MAX_USER_CONTEXT;
80 else
81 max = MAX_USER_CONTEXT_65BIT_VA;
82
83 return alloc_context_id(MIN_USER_CONTEXT, max);
a336f2f5
ME
84}
85EXPORT_SYMBOL_GPL(hash__alloc_context_id);
86
760573c1
ME
87static int hash__init_new_context(struct mm_struct *mm)
88{
89 int index;
90
91 index = hash__alloc_context_id();
92 if (index < 0)
93 return index;
94
957b778a 95 /*
effc1b25
NP
96 * In the case of exec, use the default limit,
97 * otherwise inherit it from the mm we are duplicating.
957b778a 98 */
4722476b
NP
99 if (!mm->context.slb_addr_limit)
100 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
957b778a 101
760573c1
ME
102 /*
103 * The old code would re-promote on fork, we don't do that when using
104 * slices as it could cause problem promoting slices that have been
105 * forced down to 4K.
106 *
107 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
108 * explicitly against context.id == 0. This ensures that we properly
109 * initialize context slice details for newly allocated mm's (which will
110 * have id == 0) and don't alter context slice inherited via fork (which
111 * will have id != 0).
112 *
113 * We should not be calling init_new_context() on init_mm. Hence a
114 * check against 0 is OK.
115 */
116 if (mm->context.id == 0)
117 slice_set_user_psize(mm, mmu_virtual_psize);
118
119 subpage_prot_init_new_context(mm);
120
121 return index;
122}
123
124static int radix__init_new_context(struct mm_struct *mm)
7e381c0f
AK
125{
126 unsigned long rts_field;
a25bd72b 127 int index, max_id;
760573c1 128
a25bd72b
BH
129 max_id = (1 << mmu_pid_bits) - 1;
130 index = alloc_context_id(mmu_base_pid, max_id);
760573c1
ME
131 if (index < 0)
132 return index;
7e381c0f
AK
133
134 /*
135 * set the process table entry,
136 */
b23d9c5b 137 rts_field = radix__get_tree_size();
7e381c0f 138 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
760573c1 139
3a6a0470
BH
140 /*
141 * Order the above store with subsequent update of the PID
142 * register (at which point HW can start loading/caching
143 * the entry) and the corresponding load by the MMU from
144 * the L2 cache.
145 */
146 asm volatile("ptesync;isync" : : : "memory");
147
1ab66d1f
AP
148 mm->context.npu_context = NULL;
149
760573c1 150 return index;
7e381c0f 151}
e85a4710
AG
152
153int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
154{
155 int index;
156
760573c1
ME
157 if (radix_enabled())
158 index = radix__init_new_context(mm);
159 else
160 index = hash__init_new_context(mm);
161
e85a4710
AG
162 if (index < 0)
163 return index;
164
9dfe5c53 165 mm->context.id = index;
14cf11af 166
5c1f6ee9
AK
167#ifdef CONFIG_PPC_64K_PAGES
168 mm->context.pte_frag = NULL;
15b244a8
AK
169#endif
170#ifdef CONFIG_SPAPR_TCE_IOMMU
88f54a35 171 mm_iommu_init(mm);
5c1f6ee9 172#endif
a619e59c
BH
173 atomic_set(&mm->context.active_cpus, 0);
174
14cf11af
PM
175 return 0;
176}
177
e85a4710 178void __destroy_context(int context_id)
14cf11af
PM
179{
180 spin_lock(&mmu_context_lock);
7317ac87 181 ida_remove(&mmu_context_ida, context_id);
14cf11af 182 spin_unlock(&mmu_context_lock);
e85a4710
AG
183}
184EXPORT_SYMBOL_GPL(__destroy_context);
14cf11af 185
5c1f6ee9
AK
186#ifdef CONFIG_PPC_64K_PAGES
187static void destroy_pagetable_page(struct mm_struct *mm)
188{
189 int count;
190 void *pte_frag;
191 struct page *page;
192
193 pte_frag = mm->context.pte_frag;
194 if (!pte_frag)
195 return;
196
197 page = virt_to_page(pte_frag);
198 /* drop all the pending references */
199 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
200 /* We allow PTE_FRAG_NR fragments from a PTE page */
fe896d18 201 if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
5c1f6ee9 202 pgtable_page_dtor(page);
2d4894b5 203 free_unref_page(page);
5c1f6ee9
AK
204 }
205}
206
207#else
208static inline void destroy_pagetable_page(struct mm_struct *mm)
209{
210 return;
211}
212#endif
213
e85a4710
AG
214void destroy_context(struct mm_struct *mm)
215{
15b244a8 216#ifdef CONFIG_SPAPR_TCE_IOMMU
4b6fad70 217 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
15b244a8 218#endif
30b49ec7
NP
219 if (radix_enabled())
220 WARN_ON(process_tb[mm->context.id].prtb0 != 0);
221 else
222 subpage_prot_free(mm);
223 destroy_pagetable_page(mm);
224 __destroy_context(mm->context.id);
225 mm->context.id = MMU_NO_CONTEXT;
226}
227
228void arch_exit_mmap(struct mm_struct *mm)
229{
c6bb0b8d
BH
230 if (radix_enabled()) {
231 /*
232 * Radix doesn't have a valid bit in the process table
233 * entries. However we know that at least P9 implementation
234 * will avoid caching an entry with an invalid RTS field,
235 * and 0 is invalid. So this will do.
30b49ec7
NP
236 *
237 * This runs before the "fullmm" tlb flush in exit_mmap,
238 * which does a RIC=2 tlbie to clear the process table
239 * entry. See the "fullmm" comments in tlb-radix.c.
240 *
241 * No barrier required here after the store because
242 * this process will do the invalidate, which starts with
243 * ptesync.
c6bb0b8d
BH
244 */
245 process_tb[mm->context.id].prtb0 = 0;
30b49ec7 246 }
14cf11af 247}
7e381c0f
AK
248
249#ifdef CONFIG_PPC_RADIX_MMU
250void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
251{
74e27c6a
BH
252
253 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
254 isync();
255 mtspr(SPRN_PID, next->context.id);
256 isync();
257 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
258 } else {
259 mtspr(SPRN_PID, next->context.id);
260 isync();
261 }
7e381c0f
AK
262}
263#endif