Commit | Line | Data |
---|---|---|
bc8080cb | 1 | /* |
49ea0695 | 2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. |
bc8080cb HB |
3 | * |
4 | * Author: Yu Liu, yu.liu@freescale.com | |
5 | * | |
6 | * Description: | |
7 | * This file is based on arch/powerpc/kvm/44x_tlb.c, | |
8 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License, version 2, as | |
12 | * published by the Free Software Foundation. | |
13 | */ | |
14 | ||
15 | #include <linux/types.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
bc8080cb HB |
17 | #include <linux/string.h> |
18 | #include <linux/kvm.h> | |
19 | #include <linux/kvm_host.h> | |
20 | #include <linux/highmem.h> | |
21 | #include <asm/kvm_ppc.h> | |
22 | #include <asm/kvm_e500.h> | |
23 | ||
9aa4dd5e | 24 | #include "../mm/mmu_decl.h" |
bc8080cb | 25 | #include "e500_tlb.h" |
46f43c6e | 26 | #include "trace.h" |
49ea0695 | 27 | #include "timing.h" |
bc8080cb HB |
28 | |
29 | #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) | |
30 | ||
dd9ebf1f LY |
31 | struct id { |
32 | unsigned long val; | |
33 | struct id **pentry; | |
34 | }; | |
35 | ||
36 | #define NUM_TIDS 256 | |
37 | ||
38 | /* | |
39 | * This table provide mappings from: | |
40 | * (guestAS,guestTID,guestPR) --> ID of physical cpu | |
41 | * guestAS [0..1] | |
42 | * guestTID [0..255] | |
43 | * guestPR [0..1] | |
44 | * ID [1..255] | |
45 | * Each vcpu keeps one vcpu_id_table. | |
46 | */ | |
47 | struct vcpu_id_table { | |
48 | struct id id[2][NUM_TIDS][2]; | |
49 | }; | |
50 | ||
51 | /* | |
52 | * This table provide reversed mappings of vcpu_id_table: | |
53 | * ID --> address of vcpu_id_table item. | |
54 | * Each physical core has one pcpu_id_table. | |
55 | */ | |
56 | struct pcpu_id_table { | |
57 | struct id *entry[NUM_TIDS]; | |
58 | }; | |
59 | ||
60 | static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); | |
61 | ||
62 | /* This variable keeps last used shadow ID on local core. | |
63 | * The valid range of shadow ID is [1..255] */ | |
64 | static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); | |
65 | ||
bc8080cb HB |
66 | static unsigned int tlb1_entry_num; |
67 | ||
dd9ebf1f LY |
68 | /* |
69 | * Allocate a free shadow id and setup a valid sid mapping in given entry. | |
70 | * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. | |
71 | * | |
72 | * The caller must have preemption disabled, and keep it that way until | |
73 | * it has finished with the returned shadow id (either written into the | |
74 | * TLB or arch.shadow_pid, or discarded). | |
75 | */ | |
76 | static inline int local_sid_setup_one(struct id *entry) | |
77 | { | |
78 | unsigned long sid; | |
79 | int ret = -1; | |
80 | ||
81 | sid = ++(__get_cpu_var(pcpu_last_used_sid)); | |
82 | if (sid < NUM_TIDS) { | |
83 | __get_cpu_var(pcpu_sids).entry[sid] = entry; | |
84 | entry->val = sid; | |
85 | entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; | |
86 | ret = sid; | |
87 | } | |
88 | ||
89 | /* | |
90 | * If sid == NUM_TIDS, we've run out of sids. We return -1, and | |
91 | * the caller will invalidate everything and start over. | |
92 | * | |
93 | * sid > NUM_TIDS indicates a race, which we disable preemption to | |
94 | * avoid. | |
95 | */ | |
96 | WARN_ON(sid > NUM_TIDS); | |
97 | ||
98 | return ret; | |
99 | } | |
100 | ||
101 | /* | |
102 | * Check if given entry contain a valid shadow id mapping. | |
103 | * An ID mapping is considered valid only if | |
104 | * both vcpu and pcpu know this mapping. | |
105 | * | |
106 | * The caller must have preemption disabled, and keep it that way until | |
107 | * it has finished with the returned shadow id (either written into the | |
108 | * TLB or arch.shadow_pid, or discarded). | |
109 | */ | |
110 | static inline int local_sid_lookup(struct id *entry) | |
111 | { | |
112 | if (entry && entry->val != 0 && | |
113 | __get_cpu_var(pcpu_sids).entry[entry->val] == entry && | |
114 | entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) | |
115 | return entry->val; | |
116 | return -1; | |
117 | } | |
118 | ||
119 | /* Invalidate all id mappings on local core */ | |
120 | static inline void local_sid_destroy_all(void) | |
121 | { | |
122 | preempt_disable(); | |
123 | __get_cpu_var(pcpu_last_used_sid) = 0; | |
124 | memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); | |
125 | preempt_enable(); | |
126 | } | |
127 | ||
128 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | |
129 | { | |
130 | vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); | |
131 | return vcpu_e500->idt; | |
132 | } | |
133 | ||
134 | static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) | |
135 | { | |
136 | kfree(vcpu_e500->idt); | |
137 | } | |
138 | ||
139 | /* Invalidate all mappings on vcpu */ | |
140 | static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) | |
141 | { | |
142 | memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); | |
143 | ||
144 | /* Update shadow pid when mappings are changed */ | |
145 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | |
146 | } | |
147 | ||
148 | /* Invalidate one ID mapping on vcpu */ | |
149 | static inline void kvmppc_e500_id_table_reset_one( | |
150 | struct kvmppc_vcpu_e500 *vcpu_e500, | |
151 | int as, int pid, int pr) | |
152 | { | |
153 | struct vcpu_id_table *idt = vcpu_e500->idt; | |
154 | ||
155 | BUG_ON(as >= 2); | |
156 | BUG_ON(pid >= NUM_TIDS); | |
157 | BUG_ON(pr >= 2); | |
158 | ||
159 | idt->id[as][pid][pr].val = 0; | |
160 | idt->id[as][pid][pr].pentry = NULL; | |
161 | ||
162 | /* Update shadow pid when mappings are changed */ | |
163 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | |
164 | } | |
165 | ||
166 | /* | |
167 | * Map guest (vcpu,AS,ID,PR) to physical core shadow id. | |
168 | * This function first lookup if a valid mapping exists, | |
169 | * if not, then creates a new one. | |
170 | * | |
171 | * The caller must have preemption disabled, and keep it that way until | |
172 | * it has finished with the returned shadow id (either written into the | |
173 | * TLB or arch.shadow_pid, or discarded). | |
174 | */ | |
175 | static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | |
176 | unsigned int as, unsigned int gid, | |
177 | unsigned int pr, int avoid_recursion) | |
178 | { | |
179 | struct vcpu_id_table *idt = vcpu_e500->idt; | |
180 | int sid; | |
181 | ||
182 | BUG_ON(as >= 2); | |
183 | BUG_ON(gid >= NUM_TIDS); | |
184 | BUG_ON(pr >= 2); | |
185 | ||
186 | sid = local_sid_lookup(&idt->id[as][gid][pr]); | |
187 | ||
188 | while (sid <= 0) { | |
189 | /* No mapping yet */ | |
190 | sid = local_sid_setup_one(&idt->id[as][gid][pr]); | |
191 | if (sid <= 0) { | |
192 | _tlbil_all(); | |
193 | local_sid_destroy_all(); | |
194 | } | |
195 | ||
196 | /* Update shadow pid when mappings are changed */ | |
197 | if (!avoid_recursion) | |
198 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | |
199 | } | |
200 | ||
201 | return sid; | |
202 | } | |
203 | ||
204 | /* Map guest pid to shadow. | |
205 | * We use PID to keep shadow of current guest non-zero PID, | |
206 | * and use PID1 to keep shadow of guest zero PID. | |
207 | * So that guest tlbe with TID=0 can be accessed at any time */ | |
208 | void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) | |
209 | { | |
210 | preempt_disable(); | |
211 | vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, | |
212 | get_cur_as(&vcpu_e500->vcpu), | |
213 | get_cur_pid(&vcpu_e500->vcpu), | |
214 | get_cur_pr(&vcpu_e500->vcpu), 1); | |
215 | vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, | |
216 | get_cur_as(&vcpu_e500->vcpu), 0, | |
217 | get_cur_pr(&vcpu_e500->vcpu), 1); | |
218 | preempt_enable(); | |
219 | } | |
220 | ||
bc8080cb HB |
221 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) |
222 | { | |
223 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
224 | struct tlbe *tlbe; | |
225 | int i, tlbsel; | |
226 | ||
227 | printk("| %8s | %8s | %8s | %8s | %8s |\n", | |
228 | "nr", "mas1", "mas2", "mas3", "mas7"); | |
229 | ||
230 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | |
231 | printk("Guest TLB%d:\n", tlbsel); | |
08b7fa92 LY |
232 | for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) { |
233 | tlbe = &vcpu_e500->gtlb_arch[tlbsel][i]; | |
bc8080cb HB |
234 | if (tlbe->mas1 & MAS1_VALID) |
235 | printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n", | |
236 | tlbsel, i, tlbe->mas1, tlbe->mas2, | |
237 | tlbe->mas3, tlbe->mas7); | |
238 | } | |
239 | } | |
bc8080cb HB |
240 | } |
241 | ||
242 | static inline unsigned int tlb0_get_next_victim( | |
243 | struct kvmppc_vcpu_e500 *vcpu_e500) | |
244 | { | |
245 | unsigned int victim; | |
246 | ||
08b7fa92 LY |
247 | victim = vcpu_e500->gtlb_nv[0]++; |
248 | if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM)) | |
249 | vcpu_e500->gtlb_nv[0] = 0; | |
bc8080cb HB |
250 | |
251 | return victim; | |
252 | } | |
253 | ||
254 | static inline unsigned int tlb1_max_shadow_size(void) | |
255 | { | |
a4cd8b23 SW |
256 | /* reserve one entry for magic page */ |
257 | return tlb1_entry_num - tlbcam_index - 1; | |
bc8080cb HB |
258 | } |
259 | ||
260 | static inline int tlbe_is_writable(struct tlbe *tlbe) | |
261 | { | |
262 | return tlbe->mas3 & (MAS3_SW|MAS3_UW); | |
263 | } | |
264 | ||
265 | static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) | |
266 | { | |
267 | /* Mask off reserved bits. */ | |
268 | mas3 &= MAS3_ATTRIB_MASK; | |
269 | ||
270 | if (!usermode) { | |
271 | /* Guest is in supervisor mode, | |
272 | * so we need to translate guest | |
273 | * supervisor permissions into user permissions. */ | |
274 | mas3 &= ~E500_TLB_USER_PERM_MASK; | |
275 | mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; | |
276 | } | |
277 | ||
278 | return mas3 | E500_TLB_SUPER_PERM_MASK; | |
279 | } | |
280 | ||
281 | static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) | |
282 | { | |
046a48b3 LY |
283 | #ifdef CONFIG_SMP |
284 | return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; | |
285 | #else | |
bc8080cb | 286 | return mas2 & MAS2_ATTRIB_MASK; |
046a48b3 | 287 | #endif |
bc8080cb HB |
288 | } |
289 | ||
290 | /* | |
291 | * writing shadow tlb entry to host TLB | |
292 | */ | |
0ef30995 | 293 | static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0) |
bc8080cb | 294 | { |
0ef30995 SW |
295 | unsigned long flags; |
296 | ||
297 | local_irq_save(flags); | |
298 | mtspr(SPRN_MAS0, mas0); | |
bc8080cb HB |
299 | mtspr(SPRN_MAS1, stlbe->mas1); |
300 | mtspr(SPRN_MAS2, stlbe->mas2); | |
301 | mtspr(SPRN_MAS3, stlbe->mas3); | |
302 | mtspr(SPRN_MAS7, stlbe->mas7); | |
0ef30995 SW |
303 | asm volatile("isync; tlbwe" : : : "memory"); |
304 | local_irq_restore(flags); | |
bc8080cb HB |
305 | } |
306 | ||
307 | static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |
08b7fa92 | 308 | int tlbsel, int esel, struct tlbe *stlbe) |
bc8080cb | 309 | { |
bc8080cb | 310 | if (tlbsel == 0) { |
0ef30995 SW |
311 | __write_host_tlbe(stlbe, |
312 | MAS0_TLBSEL(0) | | |
313 | MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1))); | |
bc8080cb | 314 | } else { |
0ef30995 SW |
315 | __write_host_tlbe(stlbe, |
316 | MAS0_TLBSEL(1) | | |
317 | MAS0_ESEL(to_htlb1_esel(esel))); | |
bc8080cb | 318 | } |
08b7fa92 LY |
319 | trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, |
320 | stlbe->mas3, stlbe->mas7); | |
bc8080cb HB |
321 | } |
322 | ||
a4cd8b23 SW |
323 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) |
324 | { | |
dd9ebf1f | 325 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
a4cd8b23 SW |
326 | struct tlbe magic; |
327 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; | |
dd9ebf1f | 328 | unsigned int stid; |
a4cd8b23 SW |
329 | pfn_t pfn; |
330 | ||
331 | pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; | |
332 | get_page(pfn_to_page(pfn)); | |
333 | ||
dd9ebf1f LY |
334 | preempt_disable(); |
335 | stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); | |
336 | ||
337 | magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | | |
a4cd8b23 SW |
338 | MAS1_TSIZE(BOOK3E_PAGESZ_4K); |
339 | magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; | |
340 | magic.mas3 = (pfn << PAGE_SHIFT) | | |
341 | MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; | |
342 | magic.mas7 = pfn >> (32 - PAGE_SHIFT); | |
343 | ||
344 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); | |
dd9ebf1f | 345 | preempt_enable(); |
a4cd8b23 SW |
346 | } |
347 | ||
bc8080cb HB |
348 | void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) |
349 | { | |
dd9ebf1f LY |
350 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
351 | ||
352 | /* Shadow PID may be expired on local core */ | |
353 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | |
bc8080cb HB |
354 | } |
355 | ||
356 | void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) | |
357 | { | |
dd9ebf1f LY |
358 | } |
359 | ||
360 | static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, | |
361 | int tlbsel, int esel) | |
362 | { | |
363 | struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; | |
364 | struct vcpu_id_table *idt = vcpu_e500->idt; | |
365 | unsigned int pr, tid, ts, pid; | |
366 | u32 val, eaddr; | |
367 | unsigned long flags; | |
368 | ||
369 | ts = get_tlb_ts(gtlbe); | |
370 | tid = get_tlb_tid(gtlbe); | |
371 | ||
372 | preempt_disable(); | |
373 | ||
374 | /* One guest ID may be mapped to two shadow IDs */ | |
375 | for (pr = 0; pr < 2; pr++) { | |
376 | /* | |
377 | * The shadow PID can have a valid mapping on at most one | |
378 | * host CPU. In the common case, it will be valid on this | |
379 | * CPU, in which case (for TLB0) we do a local invalidation | |
380 | * of the specific address. | |
381 | * | |
382 | * If the shadow PID is not valid on the current host CPU, or | |
383 | * if we're invalidating a TLB1 entry, we invalidate the | |
384 | * entire shadow PID. | |
385 | */ | |
386 | if (tlbsel == 1 || | |
387 | (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) { | |
388 | kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); | |
389 | continue; | |
390 | } | |
391 | ||
392 | /* | |
393 | * The guest is invalidating a TLB0 entry which is in a PID | |
394 | * that has a valid shadow mapping on this host CPU. We | |
395 | * search host TLB0 to invalidate it's shadow TLB entry, | |
396 | * similar to __tlbil_va except that we need to look in AS1. | |
397 | */ | |
398 | val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS; | |
399 | eaddr = get_tlb_eaddr(gtlbe); | |
400 | ||
401 | local_irq_save(flags); | |
402 | ||
403 | mtspr(SPRN_MAS6, val); | |
404 | asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); | |
405 | val = mfspr(SPRN_MAS1); | |
406 | if (val & MAS1_VALID) { | |
407 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | |
408 | asm volatile("tlbwe"); | |
409 | } | |
410 | ||
411 | local_irq_restore(flags); | |
412 | } | |
413 | ||
414 | preempt_enable(); | |
bc8080cb HB |
415 | } |
416 | ||
417 | /* Search the guest TLB for a matching entry. */ | |
418 | static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, | |
419 | gva_t eaddr, int tlbsel, unsigned int pid, int as) | |
420 | { | |
1aee47a0 SW |
421 | int size = vcpu_e500->gtlb_size[tlbsel]; |
422 | int set_base; | |
bc8080cb HB |
423 | int i; |
424 | ||
1aee47a0 SW |
425 | if (tlbsel == 0) { |
426 | int mask = size / KVM_E500_TLB0_WAY_NUM - 1; | |
427 | set_base = (eaddr >> PAGE_SHIFT) & mask; | |
428 | set_base *= KVM_E500_TLB0_WAY_NUM; | |
429 | size = KVM_E500_TLB0_WAY_NUM; | |
430 | } else { | |
431 | set_base = 0; | |
432 | } | |
433 | ||
434 | for (i = 0; i < size; i++) { | |
435 | struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i]; | |
bc8080cb HB |
436 | unsigned int tid; |
437 | ||
438 | if (eaddr < get_tlb_eaddr(tlbe)) | |
439 | continue; | |
440 | ||
441 | if (eaddr > get_tlb_end(tlbe)) | |
442 | continue; | |
443 | ||
444 | tid = get_tlb_tid(tlbe); | |
445 | if (tid && (tid != pid)) | |
446 | continue; | |
447 | ||
448 | if (!get_tlb_v(tlbe)) | |
449 | continue; | |
450 | ||
451 | if (get_tlb_ts(tlbe) != as && as != -1) | |
452 | continue; | |
453 | ||
1aee47a0 | 454 | return set_base + i; |
bc8080cb HB |
455 | } |
456 | ||
457 | return -1; | |
458 | } | |
459 | ||
08b7fa92 LY |
460 | static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv, |
461 | struct tlbe *gtlbe, | |
462 | pfn_t pfn) | |
bc8080cb | 463 | { |
08b7fa92 LY |
464 | priv->pfn = pfn; |
465 | priv->flags = E500_TLB_VALID; | |
bc8080cb | 466 | |
08b7fa92 LY |
467 | if (tlbe_is_writable(gtlbe)) |
468 | priv->flags |= E500_TLB_DIRTY; | |
bc8080cb HB |
469 | } |
470 | ||
08b7fa92 | 471 | static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv) |
bc8080cb | 472 | { |
08b7fa92 LY |
473 | if (priv->flags & E500_TLB_VALID) { |
474 | if (priv->flags & E500_TLB_DIRTY) | |
475 | kvm_release_pfn_dirty(priv->pfn); | |
476 | else | |
477 | kvm_release_pfn_clean(priv->pfn); | |
bc8080cb | 478 | |
08b7fa92 LY |
479 | priv->flags = 0; |
480 | } | |
bc8080cb HB |
481 | } |
482 | ||
bc8080cb HB |
483 | static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, |
484 | unsigned int eaddr, int as) | |
485 | { | |
486 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
487 | unsigned int victim, pidsel, tsized; | |
488 | int tlbsel; | |
489 | ||
fb2838d4 | 490 | /* since we only have two TLBs, only lower bit is used. */ |
bc8080cb HB |
491 | tlbsel = (vcpu_e500->mas4 >> 28) & 0x1; |
492 | victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; | |
493 | pidsel = (vcpu_e500->mas4 >> 16) & 0xf; | |
0cfb50e5 | 494 | tsized = (vcpu_e500->mas4 >> 7) & 0x1f; |
bc8080cb HB |
495 | |
496 | vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | |
08b7fa92 | 497 | | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); |
bc8080cb HB |
498 | vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) |
499 | | MAS1_TID(vcpu_e500->pid[pidsel]) | |
500 | | MAS1_TSIZE(tsized); | |
501 | vcpu_e500->mas2 = (eaddr & MAS2_EPN) | |
502 | | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK); | |
503 | vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; | |
504 | vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1) | |
505 | | (get_cur_pid(vcpu) << 16) | |
506 | | (as ? MAS6_SAS : 0); | |
507 | vcpu_e500->mas7 = 0; | |
508 | } | |
509 | ||
08b7fa92 LY |
510 | static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, |
511 | struct tlbe *gtlbe, int tsize, | |
512 | struct tlbe_priv *priv, | |
513 | u64 gvaddr, struct tlbe *stlbe) | |
514 | { | |
515 | pfn_t pfn = priv->pfn; | |
dd9ebf1f LY |
516 | unsigned int stid; |
517 | ||
518 | stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe), | |
519 | get_tlb_tid(gtlbe), | |
520 | get_cur_pr(&vcpu_e500->vcpu), 0); | |
08b7fa92 LY |
521 | |
522 | /* Force TS=1 IPROT=0 for all guest mappings. */ | |
523 | stlbe->mas1 = MAS1_TSIZE(tsize) | |
dd9ebf1f | 524 | | MAS1_TID(stid) | MAS1_TS | MAS1_VALID; |
08b7fa92 LY |
525 | stlbe->mas2 = (gvaddr & MAS2_EPN) |
526 | | e500_shadow_mas2_attrib(gtlbe->mas2, | |
527 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); | |
528 | stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN) | |
529 | | e500_shadow_mas3_attrib(gtlbe->mas3, | |
530 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); | |
531 | stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN; | |
532 | } | |
533 | ||
534 | ||
bc8080cb | 535 | static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, |
08b7fa92 LY |
536 | u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel, |
537 | struct tlbe *stlbe) | |
bc8080cb | 538 | { |
9973d54e | 539 | struct kvm_memory_slot *slot; |
9973d54e SW |
540 | unsigned long pfn, hva; |
541 | int pfnmap = 0; | |
542 | int tsize = BOOK3E_PAGESZ_4K; | |
08b7fa92 | 543 | struct tlbe_priv *priv; |
bc8080cb | 544 | |
59c1f4e3 SW |
545 | /* |
546 | * Translate guest physical to true physical, acquiring | |
547 | * a page reference if it is normal, non-reserved memory. | |
9973d54e SW |
548 | * |
549 | * gfn_to_memslot() must succeed because otherwise we wouldn't | |
550 | * have gotten this far. Eventually we should just pass the slot | |
551 | * pointer through from the first lookup. | |
59c1f4e3 | 552 | */ |
9973d54e SW |
553 | slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); |
554 | hva = gfn_to_hva_memslot(slot, gfn); | |
555 | ||
556 | if (tlbsel == 1) { | |
557 | struct vm_area_struct *vma; | |
558 | down_read(¤t->mm->mmap_sem); | |
559 | ||
560 | vma = find_vma(current->mm, hva); | |
561 | if (vma && hva >= vma->vm_start && | |
562 | (vma->vm_flags & VM_PFNMAP)) { | |
563 | /* | |
564 | * This VMA is a physically contiguous region (e.g. | |
565 | * /dev/mem) that bypasses normal Linux page | |
566 | * management. Find the overlap between the | |
567 | * vma and the memslot. | |
568 | */ | |
569 | ||
570 | unsigned long start, end; | |
571 | unsigned long slot_start, slot_end; | |
572 | ||
573 | pfnmap = 1; | |
574 | ||
575 | start = vma->vm_pgoff; | |
576 | end = start + | |
577 | ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); | |
578 | ||
579 | pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); | |
580 | ||
581 | slot_start = pfn - (gfn - slot->base_gfn); | |
582 | slot_end = slot_start + slot->npages; | |
583 | ||
584 | if (start < slot_start) | |
585 | start = slot_start; | |
586 | if (end > slot_end) | |
587 | end = slot_end; | |
588 | ||
589 | tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> | |
590 | MAS1_TSIZE_SHIFT; | |
591 | ||
592 | /* | |
593 | * e500 doesn't implement the lowest tsize bit, | |
594 | * or 1K pages. | |
595 | */ | |
596 | tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); | |
597 | ||
598 | /* | |
599 | * Now find the largest tsize (up to what the guest | |
600 | * requested) that will cover gfn, stay within the | |
601 | * range, and for which gfn and pfn are mutually | |
602 | * aligned. | |
603 | */ | |
604 | ||
605 | for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { | |
606 | unsigned long gfn_start, gfn_end, tsize_pages; | |
607 | tsize_pages = 1 << (tsize - 2); | |
608 | ||
609 | gfn_start = gfn & ~(tsize_pages - 1); | |
610 | gfn_end = gfn_start + tsize_pages; | |
611 | ||
612 | if (gfn_start + pfn - gfn < start) | |
613 | continue; | |
614 | if (gfn_end + pfn - gfn > end) | |
615 | continue; | |
616 | if ((gfn & (tsize_pages - 1)) != | |
617 | (pfn & (tsize_pages - 1))) | |
618 | continue; | |
619 | ||
620 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | |
621 | pfn &= ~(tsize_pages - 1); | |
622 | break; | |
623 | } | |
624 | } | |
625 | ||
626 | up_read(¤t->mm->mmap_sem); | |
627 | } | |
628 | ||
629 | if (likely(!pfnmap)) { | |
630 | pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn); | |
631 | if (is_error_pfn(pfn)) { | |
632 | printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", | |
633 | (long)gfn); | |
634 | kvm_release_pfn_clean(pfn); | |
635 | return; | |
636 | } | |
bc8080cb | 637 | } |
bc8080cb | 638 | |
08b7fa92 LY |
639 | /* Drop old priv and setup new one. */ |
640 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | |
641 | kvmppc_e500_priv_release(priv); | |
642 | kvmppc_e500_priv_setup(priv, gtlbe, pfn); | |
bc8080cb | 643 | |
08b7fa92 | 644 | kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe); |
bc8080cb HB |
645 | } |
646 | ||
647 | /* XXX only map the one-one case, for now use TLB0 */ | |
08b7fa92 LY |
648 | static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, |
649 | int esel, struct tlbe *stlbe) | |
bc8080cb HB |
650 | { |
651 | struct tlbe *gtlbe; | |
652 | ||
08b7fa92 | 653 | gtlbe = &vcpu_e500->gtlb_arch[0][esel]; |
bc8080cb HB |
654 | |
655 | kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), | |
656 | get_tlb_raddr(gtlbe) >> PAGE_SHIFT, | |
08b7fa92 | 657 | gtlbe, 0, esel, stlbe); |
bc8080cb HB |
658 | |
659 | return esel; | |
660 | } | |
661 | ||
662 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | |
663 | * the shadow TLB. */ | |
664 | /* XXX for both one-one and one-to-many , for now use TLB1 */ | |
665 | static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |
08b7fa92 | 666 | u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe) |
bc8080cb HB |
667 | { |
668 | unsigned int victim; | |
669 | ||
08b7fa92 | 670 | victim = vcpu_e500->gtlb_nv[1]++; |
bc8080cb | 671 | |
08b7fa92 LY |
672 | if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size())) |
673 | vcpu_e500->gtlb_nv[1] = 0; | |
bc8080cb | 674 | |
08b7fa92 | 675 | kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe); |
bc8080cb HB |
676 | |
677 | return victim; | |
678 | } | |
679 | ||
dd9ebf1f | 680 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) |
bc8080cb | 681 | { |
dd9ebf1f LY |
682 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
683 | ||
684 | /* Recalc shadow pid since MSR changes */ | |
685 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | |
bc8080cb HB |
686 | } |
687 | ||
08b7fa92 LY |
688 | static inline int kvmppc_e500_gtlbe_invalidate( |
689 | struct kvmppc_vcpu_e500 *vcpu_e500, | |
690 | int tlbsel, int esel) | |
bc8080cb | 691 | { |
08b7fa92 | 692 | struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; |
bc8080cb HB |
693 | |
694 | if (unlikely(get_tlb_iprot(gtlbe))) | |
695 | return -1; | |
696 | ||
bc8080cb HB |
697 | gtlbe->mas1 = 0; |
698 | ||
699 | return 0; | |
700 | } | |
701 | ||
b0a1835d LY |
702 | int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) |
703 | { | |
704 | int esel; | |
705 | ||
706 | if (value & MMUCSR0_TLB0FI) | |
08b7fa92 | 707 | for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++) |
b0a1835d LY |
708 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); |
709 | if (value & MMUCSR0_TLB1FI) | |
08b7fa92 | 710 | for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++) |
b0a1835d LY |
711 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); |
712 | ||
dd9ebf1f LY |
713 | /* Invalidate all vcpu id mappings */ |
714 | kvmppc_e500_id_table_reset_all(vcpu_e500); | |
b0a1835d LY |
715 | |
716 | return EMULATE_DONE; | |
717 | } | |
718 | ||
bc8080cb HB |
719 | int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) |
720 | { | |
721 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
722 | unsigned int ia; | |
723 | int esel, tlbsel; | |
724 | gva_t ea; | |
725 | ||
8e5b26b5 | 726 | ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb); |
bc8080cb HB |
727 | |
728 | ia = (ea >> 2) & 0x1; | |
729 | ||
fb2838d4 | 730 | /* since we only have two TLBs, only lower bit is used. */ |
bc8080cb HB |
731 | tlbsel = (ea >> 3) & 0x1; |
732 | ||
733 | if (ia) { | |
734 | /* invalidate all entries */ | |
08b7fa92 | 735 | for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++) |
bc8080cb HB |
736 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); |
737 | } else { | |
738 | ea &= 0xfffff000; | |
739 | esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, | |
740 | get_cur_pid(vcpu), -1); | |
741 | if (esel >= 0) | |
742 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); | |
743 | } | |
744 | ||
dd9ebf1f LY |
745 | /* Invalidate all vcpu id mappings */ |
746 | kvmppc_e500_id_table_reset_all(vcpu_e500); | |
bc8080cb HB |
747 | |
748 | return EMULATE_DONE; | |
749 | } | |
750 | ||
751 | int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) | |
752 | { | |
753 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
754 | int tlbsel, esel; | |
755 | struct tlbe *gtlbe; | |
756 | ||
757 | tlbsel = get_tlb_tlbsel(vcpu_e500); | |
758 | esel = get_tlb_esel(vcpu_e500, tlbsel); | |
759 | ||
08b7fa92 | 760 | gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; |
bc35cbc8 | 761 | vcpu_e500->mas0 &= ~MAS0_NV(~0); |
08b7fa92 | 762 | vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); |
bc8080cb HB |
763 | vcpu_e500->mas1 = gtlbe->mas1; |
764 | vcpu_e500->mas2 = gtlbe->mas2; | |
765 | vcpu_e500->mas3 = gtlbe->mas3; | |
766 | vcpu_e500->mas7 = gtlbe->mas7; | |
767 | ||
768 | return EMULATE_DONE; | |
769 | } | |
770 | ||
771 | int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) | |
772 | { | |
773 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
774 | int as = !!get_cur_sas(vcpu_e500); | |
775 | unsigned int pid = get_cur_spid(vcpu_e500); | |
776 | int esel, tlbsel; | |
777 | struct tlbe *gtlbe = NULL; | |
778 | gva_t ea; | |
779 | ||
8e5b26b5 | 780 | ea = kvmppc_get_gpr(vcpu, rb); |
bc8080cb HB |
781 | |
782 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | |
783 | esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); | |
784 | if (esel >= 0) { | |
08b7fa92 | 785 | gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; |
bc8080cb HB |
786 | break; |
787 | } | |
788 | } | |
789 | ||
790 | if (gtlbe) { | |
791 | vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) | |
08b7fa92 | 792 | | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); |
bc8080cb HB |
793 | vcpu_e500->mas1 = gtlbe->mas1; |
794 | vcpu_e500->mas2 = gtlbe->mas2; | |
795 | vcpu_e500->mas3 = gtlbe->mas3; | |
796 | vcpu_e500->mas7 = gtlbe->mas7; | |
797 | } else { | |
798 | int victim; | |
799 | ||
fb2838d4 | 800 | /* since we only have two TLBs, only lower bit is used. */ |
bc8080cb HB |
801 | tlbsel = vcpu_e500->mas4 >> 28 & 0x1; |
802 | victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; | |
803 | ||
804 | vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | |
08b7fa92 | 805 | | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); |
bc8080cb HB |
806 | vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0) |
807 | | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0)) | |
808 | | (vcpu_e500->mas4 & MAS4_TSIZED(~0)); | |
809 | vcpu_e500->mas2 &= MAS2_EPN; | |
810 | vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK; | |
811 | vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; | |
812 | vcpu_e500->mas7 = 0; | |
813 | } | |
814 | ||
49ea0695 | 815 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); |
bc8080cb HB |
816 | return EMULATE_DONE; |
817 | } | |
818 | ||
819 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |
820 | { | |
821 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
bc8080cb | 822 | struct tlbe *gtlbe; |
08b7fa92 | 823 | int tlbsel, esel; |
bc8080cb HB |
824 | |
825 | tlbsel = get_tlb_tlbsel(vcpu_e500); | |
826 | esel = get_tlb_esel(vcpu_e500, tlbsel); | |
827 | ||
08b7fa92 | 828 | gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; |
bc8080cb | 829 | |
dd9ebf1f LY |
830 | if (get_tlb_v(gtlbe)) |
831 | kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel); | |
bc8080cb HB |
832 | |
833 | gtlbe->mas1 = vcpu_e500->mas1; | |
834 | gtlbe->mas2 = vcpu_e500->mas2; | |
835 | gtlbe->mas3 = vcpu_e500->mas3; | |
836 | gtlbe->mas7 = vcpu_e500->mas7; | |
837 | ||
46f43c6e MT |
838 | trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2, |
839 | gtlbe->mas3, gtlbe->mas7); | |
bc8080cb HB |
840 | |
841 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | |
842 | if (tlbe_is_host_safe(vcpu, gtlbe)) { | |
08b7fa92 LY |
843 | struct tlbe stlbe; |
844 | int stlbsel, sesel; | |
845 | u64 eaddr; | |
846 | u64 raddr; | |
847 | ||
dd9ebf1f | 848 | preempt_disable(); |
bc8080cb HB |
849 | switch (tlbsel) { |
850 | case 0: | |
851 | /* TLB0 */ | |
852 | gtlbe->mas1 &= ~MAS1_TSIZE(~0); | |
0cfb50e5 | 853 | gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); |
bc8080cb HB |
854 | |
855 | stlbsel = 0; | |
08b7fa92 | 856 | sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); |
bc8080cb HB |
857 | |
858 | break; | |
859 | ||
860 | case 1: | |
861 | /* TLB1 */ | |
862 | eaddr = get_tlb_eaddr(gtlbe); | |
863 | raddr = get_tlb_raddr(gtlbe); | |
864 | ||
865 | /* Create a 4KB mapping on the host. | |
866 | * If the guest wanted a large page, | |
867 | * only the first 4KB is mapped here and the rest | |
868 | * are mapped on the fly. */ | |
869 | stlbsel = 1; | |
870 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, | |
08b7fa92 | 871 | raddr >> PAGE_SHIFT, gtlbe, &stlbe); |
bc8080cb HB |
872 | break; |
873 | ||
874 | default: | |
875 | BUG(); | |
876 | } | |
08b7fa92 | 877 | write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe); |
dd9ebf1f | 878 | preempt_enable(); |
bc8080cb HB |
879 | } |
880 | ||
49ea0695 | 881 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); |
bc8080cb HB |
882 | return EMULATE_DONE; |
883 | } | |
884 | ||
885 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | |
886 | { | |
666e7252 | 887 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
bc8080cb HB |
888 | |
889 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); | |
890 | } | |
891 | ||
892 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | |
893 | { | |
666e7252 | 894 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); |
bc8080cb HB |
895 | |
896 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); | |
897 | } | |
898 | ||
899 | void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) | |
900 | { | |
666e7252 | 901 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
bc8080cb HB |
902 | |
903 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); | |
904 | } | |
905 | ||
906 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) | |
907 | { | |
666e7252 | 908 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); |
bc8080cb HB |
909 | |
910 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); | |
911 | } | |
912 | ||
913 | gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, | |
914 | gva_t eaddr) | |
915 | { | |
916 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
917 | struct tlbe *gtlbe = | |
08b7fa92 | 918 | &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)]; |
bc8080cb HB |
919 | u64 pgmask = get_tlb_bytes(gtlbe) - 1; |
920 | ||
921 | return get_tlb_raddr(gtlbe) | (eaddr & pgmask); | |
922 | } | |
923 | ||
924 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | |
925 | { | |
bc8080cb HB |
926 | } |
927 | ||
928 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |
929 | unsigned int index) | |
930 | { | |
931 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
08b7fa92 LY |
932 | struct tlbe_priv *priv; |
933 | struct tlbe *gtlbe, stlbe; | |
bc8080cb HB |
934 | int tlbsel = tlbsel_of(index); |
935 | int esel = esel_of(index); | |
936 | int stlbsel, sesel; | |
937 | ||
08b7fa92 LY |
938 | gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; |
939 | ||
dd9ebf1f | 940 | preempt_disable(); |
bc8080cb HB |
941 | switch (tlbsel) { |
942 | case 0: | |
943 | stlbsel = 0; | |
944 | sesel = esel; | |
08b7fa92 LY |
945 | priv = &vcpu_e500->gtlb_priv[stlbsel][sesel]; |
946 | ||
947 | kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K, | |
948 | priv, eaddr, &stlbe); | |
bc8080cb HB |
949 | break; |
950 | ||
951 | case 1: { | |
952 | gfn_t gfn = gpaddr >> PAGE_SHIFT; | |
bc8080cb HB |
953 | |
954 | stlbsel = 1; | |
08b7fa92 LY |
955 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, |
956 | gtlbe, &stlbe); | |
bc8080cb HB |
957 | break; |
958 | } | |
959 | ||
960 | default: | |
961 | BUG(); | |
962 | break; | |
963 | } | |
08b7fa92 LY |
964 | |
965 | write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe); | |
dd9ebf1f | 966 | preempt_enable(); |
bc8080cb HB |
967 | } |
968 | ||
969 | int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, | |
970 | gva_t eaddr, unsigned int pid, int as) | |
971 | { | |
972 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
973 | int esel, tlbsel; | |
974 | ||
975 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | |
976 | esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); | |
977 | if (esel >= 0) | |
978 | return index_of(tlbsel, esel); | |
979 | } | |
980 | ||
981 | return -1; | |
982 | } | |
983 | ||
5ce941ee SW |
984 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) |
985 | { | |
986 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
987 | ||
dd9ebf1f LY |
988 | if (vcpu->arch.pid != pid) { |
989 | vcpu_e500->pid[0] = vcpu->arch.pid = pid; | |
990 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | |
991 | } | |
5ce941ee SW |
992 | } |
993 | ||
bc8080cb HB |
994 | void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) |
995 | { | |
996 | struct tlbe *tlbe; | |
997 | ||
998 | /* Insert large initial mapping for guest. */ | |
08b7fa92 | 999 | tlbe = &vcpu_e500->gtlb_arch[1][0]; |
0cfb50e5 | 1000 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); |
bc8080cb HB |
1001 | tlbe->mas2 = 0; |
1002 | tlbe->mas3 = E500_TLB_SUPER_PERM_MASK; | |
1003 | tlbe->mas7 = 0; | |
1004 | ||
1005 | /* 4K map for serial output. Used by kernel wrapper. */ | |
08b7fa92 | 1006 | tlbe = &vcpu_e500->gtlb_arch[1][1]; |
0cfb50e5 | 1007 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); |
bc8080cb HB |
1008 | tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; |
1009 | tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; | |
1010 | tlbe->mas7 = 0; | |
1011 | } | |
1012 | ||
1013 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |
1014 | { | |
1015 | tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF; | |
1016 | ||
08b7fa92 LY |
1017 | vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE; |
1018 | vcpu_e500->gtlb_arch[0] = | |
bc8080cb | 1019 | kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL); |
08b7fa92 | 1020 | if (vcpu_e500->gtlb_arch[0] == NULL) |
bc8080cb HB |
1021 | goto err_out; |
1022 | ||
08b7fa92 LY |
1023 | vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE; |
1024 | vcpu_e500->gtlb_arch[1] = | |
bc8080cb | 1025 | kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL); |
08b7fa92 LY |
1026 | if (vcpu_e500->gtlb_arch[1] == NULL) |
1027 | goto err_out_guest0; | |
bc8080cb | 1028 | |
08b7fa92 LY |
1029 | vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *) |
1030 | kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL); | |
1031 | if (vcpu_e500->gtlb_priv[0] == NULL) | |
bc8080cb | 1032 | goto err_out_guest1; |
08b7fa92 LY |
1033 | vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *) |
1034 | kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL); | |
1035 | ||
1036 | if (vcpu_e500->gtlb_priv[1] == NULL) | |
1037 | goto err_out_priv0; | |
bc8080cb | 1038 | |
dd9ebf1f LY |
1039 | if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) |
1040 | goto err_out_priv1; | |
1041 | ||
da15bf43 LY |
1042 | /* Init TLB configuration register */ |
1043 | vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; | |
08b7fa92 | 1044 | vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0]; |
da15bf43 | 1045 | vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL; |
08b7fa92 | 1046 | vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1]; |
da15bf43 | 1047 | |
bc8080cb HB |
1048 | return 0; |
1049 | ||
dd9ebf1f LY |
1050 | err_out_priv1: |
1051 | kfree(vcpu_e500->gtlb_priv[1]); | |
08b7fa92 LY |
1052 | err_out_priv0: |
1053 | kfree(vcpu_e500->gtlb_priv[0]); | |
bc8080cb | 1054 | err_out_guest1: |
08b7fa92 | 1055 | kfree(vcpu_e500->gtlb_arch[1]); |
bc8080cb | 1056 | err_out_guest0: |
08b7fa92 | 1057 | kfree(vcpu_e500->gtlb_arch[0]); |
bc8080cb HB |
1058 | err_out: |
1059 | return -1; | |
1060 | } | |
1061 | ||
1062 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | |
1063 | { | |
08b7fa92 LY |
1064 | int stlbsel, i; |
1065 | ||
1066 | /* release all privs */ | |
1067 | for (stlbsel = 0; stlbsel < 2; stlbsel++) | |
1068 | for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) { | |
1069 | struct tlbe_priv *priv = | |
1070 | &vcpu_e500->gtlb_priv[stlbsel][i]; | |
1071 | kvmppc_e500_priv_release(priv); | |
1072 | } | |
1073 | ||
dd9ebf1f | 1074 | kvmppc_e500_id_table_free(vcpu_e500); |
08b7fa92 LY |
1075 | kfree(vcpu_e500->gtlb_arch[1]); |
1076 | kfree(vcpu_e500->gtlb_arch[0]); | |
bc8080cb | 1077 | } |