KVM: PPC: Book3S HV: Fix migration and HPT resizing of HPT guests on radix hosts
[linux-2.6-block.git] / arch / powerpc / include / asm / kvm_book3s_64.h
CommitLineData
3ae07890
AG
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__
22
e641a317
PM
23#include <linux/string.h>
24#include <asm/bitops.h>
0eeede0c
PM
25#include <asm/book3s/64/mmu-hash.h>
26
aae0777f
DG
27/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
28#define PPC_MIN_HPT_ORDER 18
29#define PPC_MAX_HPT_ORDER 46
30
7aa79938 31#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
468a12c2 32static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
3ae07890 33{
468a12c2 34 preempt_disable();
3ae07890
AG
35 return &get_paca()->shadow_vcpu;
36}
468a12c2
AG
37
38static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
39{
40 preempt_enable();
41}
de56a948 42#endif
3ae07890 43
9975f5e3 44#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
9e04ba69
PM
45
46static inline bool kvm_is_radix(struct kvm *kvm)
47{
48 return kvm->arch.radix;
49}
50
32fad281 51#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
8936dda4
PM
52#endif
53
075295dd
PM
54/*
55 * We use a lock bit in HPTE dword 0 to synchronize updates and
56 * accesses to each HPTE, and another bit to indicate non-present
57 * HPTEs.
58 */
59#define HPTE_V_HVLOCK 0x40UL
697d3899 60#define HPTE_V_ABSENT 0x20UL
075295dd 61
44e5f6be
PM
62/*
63 * We use this bit in the guest_rpte field of the revmap entry
64 * to indicate a modified HPTE.
65 */
66#define HPTE_GR_MODIFIED (1ul << 62)
67
68/* These bits are reserved in the guest view of the HPTE */
69#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
70
6f22bd32 71static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
075295dd
PM
72{
73 unsigned long tmp, old;
6f22bd32
AG
74 __be64 be_lockbit, be_bits;
75
76 /*
77 * We load/store in native endian, but the HTAB is in big endian. If
78 * we byte swap all data we apply on the PTE we're implicitly correct
79 * again.
80 */
81 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
82 be_bits = cpu_to_be64(bits);
075295dd
PM
83
84 asm volatile(" ldarx %0,0,%2\n"
85 " and. %1,%0,%3\n"
86 " bne 2f\n"
6f22bd32 87 " or %0,%0,%4\n"
075295dd
PM
88 " stdcx. %0,0,%2\n"
89 " beq+ 2f\n"
8b5869ad 90 " mr %1,%3\n"
075295dd
PM
91 "2: isync"
92 : "=&r" (tmp), "=&r" (old)
6f22bd32 93 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
075295dd
PM
94 : "cc", "memory");
95 return old == 0;
96}
97
a4bd6eb0
AK
98static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
99{
100 hpte_v &= ~HPTE_V_HVLOCK;
101 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
102 hpte[0] = cpu_to_be64(hpte_v);
103}
104
105/* Without barrier */
106static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
107{
108 hpte_v &= ~HPTE_V_HVLOCK;
109 hpte[0] = cpu_to_be64(hpte_v);
110}
111
8dc6cca5
PM
112/*
113 * These functions encode knowledge of the POWER7/8/9 hardware
114 * interpretations of the HPTE LP (large page size) field.
115 */
116static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
117{
118 unsigned int lphi;
119
120 if (!(h & HPTE_V_LARGE))
121 return 12; /* 4kB */
122 lphi = (l >> 16) & 0xf;
123 switch ((l >> 12) & 0xf) {
124 case 0:
125 return !lphi ? 24 : -1; /* 16MB */
126 break;
127 case 1:
128 return 16; /* 64kB */
129 break;
130 case 3:
131 return !lphi ? 34 : -1; /* 16GB */
132 break;
133 case 7:
134 return (16 << 8) + 12; /* 64kB in 4kB */
135 break;
136 case 8:
137 if (!lphi)
138 return (24 << 8) + 16; /* 16MB in 64kkB */
139 if (lphi == 3)
140 return (24 << 8) + 12; /* 16MB in 4kB */
141 break;
142 }
143 return -1;
144}
145
146static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
147{
148 return kvmppc_hpte_page_shifts(h, l) & 0xff;
149}
150
151static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
152{
153 int tmp = kvmppc_hpte_page_shifts(h, l);
154
155 if (tmp >= 0x100)
156 tmp >>= 8;
157 return tmp;
158}
159
160static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
161{
162 return 1ul << kvmppc_hpte_actual_page_shift(v, r);
163}
164
165static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
166{
167 switch (base_shift) {
168 case 12:
169 switch (actual_shift) {
170 case 12:
171 return 0;
172 case 16:
173 return 7;
174 case 24:
175 return 0x38;
176 }
177 break;
178 case 16:
179 switch (actual_shift) {
180 case 16:
181 return 1;
182 case 24:
183 return 8;
184 }
185 break;
186 case 24:
187 return 0;
188 }
189 return -1;
190}
191
36cc66d6
AS
192static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
193 unsigned long pte_index)
194{
8dc6cca5 195 int a_pgshift, b_pgshift;
1f365bb0 196 unsigned long rb = 0, va_low, sllp;
1f365bb0 197
8dc6cca5
PM
198 b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
199 if (a_pgshift >= 0x100) {
200 b_pgshift &= 0xff;
201 a_pgshift >>= 8;
1f365bb0 202 }
0eeede0c 203
1f365bb0
AK
204 /*
205 * Ignore the top 14 bits of va
206 * v have top two bits covering segment size, hence move
207 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
208 * AVA field in v also have the lower 23 bits ignored.
209 * For base page size 4K we need 14 .. 65 bits (so need to
210 * collect extra 11 bits)
211 * For others we need 14..14+i
212 */
213 /* This covers 14..54 bits of va*/
36cc66d6 214 rb = (v & ~0x7fUL) << 16; /* AVA field */
63fff5c1 215
1f365bb0
AK
216 /*
217 * AVA in v had cleared lower 23 bits. We need to derive
218 * that from pteg index
219 */
36cc66d6
AS
220 va_low = pte_index >> 3;
221 if (v & HPTE_V_SECONDARY)
222 va_low = ~va_low;
1f365bb0
AK
223 /*
224 * get the vpn bits from va_low using reverse of hashing.
225 * In v we have va with 23 bits dropped and then left shifted
226 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
227 * right shift it with (SID_SHIFT - (23 - 7))
228 */
36cc66d6 229 if (!(v & HPTE_V_1TB_SEG))
1f365bb0 230 va_low ^= v >> (SID_SHIFT - 16);
36cc66d6 231 else
1f365bb0 232 va_low ^= v >> (SID_SHIFT_1T - 16);
36cc66d6 233 va_low &= 0x7ff;
1f365bb0 234
8dc6cca5
PM
235 if (b_pgshift == 12) {
236 if (a_pgshift > 12) {
237 sllp = (a_pgshift == 16) ? 5 : 4;
238 rb |= sllp << 5; /* AP field */
239 }
1f365bb0 240 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
8dc6cca5 241 } else {
1f365bb0
AK
242 int aval_shift;
243 /*
63fff5c1 244 * remaining bits of AVA/LP fields
1f365bb0
AK
245 * Also contain the rr bits of LP
246 */
8dc6cca5 247 rb |= (va_low << b_pgshift) & 0x7ff000;
1f365bb0
AK
248 /*
249 * Now clear not needed LP bits based on actual psize
250 */
8dc6cca5 251 rb &= ~((1ul << a_pgshift) - 1);
1f365bb0
AK
252 /*
253 * AVAL field 58..77 - base_page_shift bits of va
254 * we have space for 58..64 bits, Missing bits should
255 * be zero filled. +1 is to take care of L bit shift
256 */
8dc6cca5 257 aval_shift = 64 - (77 - b_pgshift) + 1;
1f365bb0
AK
258 rb |= ((va_low << aval_shift) & 0xfe);
259
260 rb |= 1; /* L field */
8dc6cca5 261 rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
36cc66d6 262 }
4f053d06 263 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
36cc66d6
AS
264 return rb;
265}
266
06ce2c63
PM
267static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
268{
269 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
270}
271
4cf302bc
PM
272static inline int hpte_is_writable(unsigned long ptel)
273{
274 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
275
276 return pp != PP_RXRX && pp != PP_RXXX;
277}
278
279static inline unsigned long hpte_make_readonly(unsigned long ptel)
280{
281 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
282 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
283 else
284 ptel |= PP_RXRX;
285 return ptel;
286}
287
30bda41a 288static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
9d0ef5ea 289{
30bda41a 290 unsigned int wimg = hptel & HPTE_R_WIMG;
9d0ef5ea
PM
291
292 /* Handle SAO */
293 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
294 cpu_has_feature(CPU_FTR_ARCH_206))
295 wimg = HPTE_R_M;
296
30bda41a 297 if (!is_ci)
9d0ef5ea 298 return wimg == HPTE_R_M;
30bda41a
AK
299 /*
300 * if host is mapped cache inhibited, make sure hptel also have
301 * cache inhibited.
302 */
303 if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
304 return false;
305 return !!(wimg & HPTE_R_I);
9d0ef5ea
PM
306}
307
342d3db7 308/*
db7cb5b9 309 * If it's present and writable, atomically set dirty and referenced bits and
7d6e7f7f 310 * return the PTE, otherwise return 0.
342d3db7 311 */
7d6e7f7f 312static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
342d3db7 313{
db7cb5b9
AK
314 pte_t old_pte, new_pte = __pte(0);
315
316 while (1) {
5e1d44ae
AK
317 /*
318 * Make sure we don't reload from ptep
319 */
320 old_pte = READ_ONCE(*ptep);
db7cb5b9 321 /*
945537df 322 * wait until H_PAGE_BUSY is clear then set it atomically
db7cb5b9 323 */
945537df 324 if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
db7cb5b9
AK
325 cpu_relax();
326 continue;
327 }
db7cb5b9 328 /* If pte is not present return None */
4f9c53c8 329 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
db7cb5b9 330 return __pte(0);
342d3db7 331
db7cb5b9
AK
332 new_pte = pte_mkyoung(old_pte);
333 if (writing && pte_write(old_pte))
334 new_pte = pte_mkdirty(new_pte);
342d3db7 335
3910a7f4 336 if (pte_xchg(ptep, old_pte, new_pte))
db7cb5b9
AK
337 break;
338 }
339 return new_pte;
342d3db7
PM
340}
341
697d3899
PM
342static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
343{
344 if (key)
345 return PP_RWRX <= pp && pp <= PP_RXRX;
acdb6685 346 return true;
697d3899
PM
347}
348
349static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
350{
351 if (key)
352 return pp == PP_RWRW;
353 return pp <= PP_RWRW;
354}
355
356static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
357{
358 unsigned long skey;
359
360 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
361 ((hpte_r & HPTE_R_KEY_LO) >> 9);
362 return (amr >> (62 - 2 * skey)) & 3;
363}
364
06ce2c63
PM
365static inline void lock_rmap(unsigned long *rmap)
366{
367 do {
368 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
369 cpu_relax();
370 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
371}
372
373static inline void unlock_rmap(unsigned long *rmap)
374{
375 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
376}
377
da9d1d7f
PM
378static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
379 unsigned long pagesize)
380{
381 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
382
383 if (pagesize <= PAGE_SIZE)
acdb6685 384 return true;
da9d1d7f
PM
385 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
386}
387
a2932923
PM
388/*
389 * This works for 4k, 64k and 16M pages on POWER7,
390 * and 4k and 16M pages on PPC970.
391 */
392static inline unsigned long slb_pgsize_encoding(unsigned long psize)
393{
394 unsigned long senc = 0;
395
396 if (psize > 0x1000) {
397 senc = SLB_VSID_L;
398 if (psize == 0x10000)
399 senc |= SLB_VSID_LP_01;
400 }
401 return senc;
402}
403
404static inline int is_vrma_hpte(unsigned long hpte_v)
405{
406 return (hpte_v & ~0xffffffUL) ==
407 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
408}
409
9975f5e3 410#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
a1b4a0f6
PM
411/*
412 * Note modification of an HPTE; set the HPTE modified bit
413 * if anyone is interested.
414 */
415static inline void note_hpte_modification(struct kvm *kvm,
416 struct revmap_entry *rev)
417{
418 if (atomic_read(&kvm->arch.hpte_mod_interest))
419 rev->guest_rpte |= HPTE_GR_MODIFIED;
420}
797f9c07
PM
421
422/*
423 * Like kvm_memslots(), but for use in real mode when we can't do
424 * any RCU stuff (since the secondary threads are offline from the
425 * kernel's point of view), and we can't print anything.
426 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
427 */
428static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
429{
f481b069 430 return rcu_dereference_raw_notrace(kvm->memslots[0]);
797f9c07
PM
431}
432
e23a808b
PM
433extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
434
eddb60fb
PM
435extern void kvmhv_rm_send_ipi(int cpu);
436
3d089f84
DG
437static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
438{
439 /* HPTEs are 2**4 bytes long */
440 return 1UL << (hpt->order - 4);
441}
442
443static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
444{
445 /* 128 (2**7) bytes in each HPTEG */
446 return (1UL << (hpt->order - 7)) - 1;
447}
448
e641a317
PM
449/* Set bits in a dirty bitmap, which is in LE format */
450static inline void set_dirty_bits(unsigned long *map, unsigned long i,
451 unsigned long npages)
452{
453
454 if (npages >= 8)
455 memset((char *)map + i / 8, 0xff, npages / 8);
456 else
457 for (; npages; ++i, --npages)
458 __set_bit_le(i, map);
459}
460
461static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
462 unsigned long npages)
463{
464 if (npages >= 8)
465 memset((char *)map + i / 8, 0xff, npages / 8);
466 else
467 for (; npages; ++i, --npages)
468 set_bit_le(i, map);
469}
470
9975f5e3 471#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
a1b4a0f6 472
3ae07890 473#endif /* __ASM_KVM_BOOK3S_64_H__ */