Commit | Line | Data |
---|---|---|
aa04b4cc PM |
1 | /* |
2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License, version 2, as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
441c19c8 | 9 | #include <linux/cpu.h> |
aa04b4cc PM |
10 | #include <linux/kvm_host.h> |
11 | #include <linux/preempt.h> | |
66b15db6 | 12 | #include <linux/export.h> |
aa04b4cc PM |
13 | #include <linux/sched.h> |
14 | #include <linux/spinlock.h> | |
aa04b4cc | 15 | #include <linux/init.h> |
fa61a4e3 AK |
16 | #include <linux/memblock.h> |
17 | #include <linux/sizes.h> | |
fc95ca72 | 18 | #include <linux/cma.h> |
90fd09f8 | 19 | #include <linux/bitops.h> |
aa04b4cc | 20 | |
7c1bd80c | 21 | #include <asm/asm-prototypes.h> |
aa04b4cc PM |
22 | #include <asm/cputable.h> |
23 | #include <asm/kvm_ppc.h> | |
24 | #include <asm/kvm_book3s.h> | |
e928e9cb | 25 | #include <asm/archrandom.h> |
eddb60fb | 26 | #include <asm/xics.h> |
243e2511 | 27 | #include <asm/xive.h> |
66feed61 PM |
28 | #include <asm/dbell.h> |
29 | #include <asm/cputhreads.h> | |
37f55d30 | 30 | #include <asm/io.h> |
f725758b | 31 | #include <asm/opal.h> |
e2702871 | 32 | #include <asm/smp.h> |
aa04b4cc | 33 | |
fc95ca72 JK |
34 | #define KVM_CMA_CHUNK_ORDER 18 |
35 | ||
5af50993 BH |
36 | #include "book3s_xics.h" |
37 | #include "book3s_xive.h" | |
38 | ||
39 | /* | |
40 | * The XIVE module will populate these when it loads | |
41 | */ | |
42 | unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); | |
43 | unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); | |
44 | int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, | |
45 | unsigned long mfrr); | |
46 | int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); | |
47 | int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); | |
48 | EXPORT_SYMBOL_GPL(__xive_vm_h_xirr); | |
49 | EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll); | |
50 | EXPORT_SYMBOL_GPL(__xive_vm_h_ipi); | |
51 | EXPORT_SYMBOL_GPL(__xive_vm_h_cppr); | |
52 | EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); | |
53 | ||
fa61a4e3 AK |
54 | /* |
55 | * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) | |
56 | * should be power of 2. | |
57 | */ | |
58 | #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ | |
59 | /* | |
60 | * By default we reserve 5% of memory for hash pagetable allocation. | |
61 | */ | |
62 | static unsigned long kvm_cma_resv_ratio = 5; | |
aa04b4cc | 63 | |
fc95ca72 JK |
64 | static struct cma *kvm_cma; |
65 | ||
fa61a4e3 | 66 | static int __init early_parse_kvm_cma_resv(char *p) |
d2a1b483 | 67 | { |
fa61a4e3 | 68 | pr_debug("%s(%s)\n", __func__, p); |
d2a1b483 | 69 | if (!p) |
fa61a4e3 AK |
70 | return -EINVAL; |
71 | return kstrtoul(p, 0, &kvm_cma_resv_ratio); | |
d2a1b483 | 72 | } |
fa61a4e3 | 73 | early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); |
d2a1b483 | 74 | |
db9a290d | 75 | struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) |
d2a1b483 | 76 | { |
c04fa583 | 77 | VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); |
fc95ca72 | 78 | |
e2f466e3 | 79 | return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), |
65182029 | 80 | false); |
d2a1b483 | 81 | } |
db9a290d | 82 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); |
d2a1b483 | 83 | |
db9a290d | 84 | void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) |
d2a1b483 | 85 | { |
fc95ca72 | 86 | cma_release(kvm_cma, page, nr_pages); |
d2a1b483 | 87 | } |
db9a290d | 88 | EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); |
d2a1b483 | 89 | |
fa61a4e3 AK |
90 | /** |
91 | * kvm_cma_reserve() - reserve area for kvm hash pagetable | |
92 | * | |
93 | * This function reserves memory from early allocator. It should be | |
14ed7409 | 94 | * called by arch specific code once the memblock allocator |
fa61a4e3 AK |
95 | * has been activated and all other subsystems have already allocated/reserved |
96 | * memory. | |
97 | */ | |
98 | void __init kvm_cma_reserve(void) | |
99 | { | |
100 | unsigned long align_size; | |
101 | struct memblock_region *reg; | |
102 | phys_addr_t selected_size = 0; | |
cec26bc3 AK |
103 | |
104 | /* | |
105 | * We need CMA reservation only when we are in HV mode | |
106 | */ | |
107 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | |
108 | return; | |
fa61a4e3 AK |
109 | /* |
110 | * We cannot use memblock_phys_mem_size() here, because | |
111 | * memblock_analyze() has not been called yet. | |
112 | */ | |
113 | for_each_memblock(memory, reg) | |
114 | selected_size += memblock_region_memory_end_pfn(reg) - | |
115 | memblock_region_memory_base_pfn(reg); | |
116 | ||
117 | selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; | |
118 | if (selected_size) { | |
119 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, | |
120 | (unsigned long)selected_size / SZ_1M); | |
c17b98cf | 121 | align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; |
c1f733aa | 122 | cma_declare_contiguous(0, selected_size, 0, align_size, |
f318dd08 LA |
123 | KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", |
124 | &kvm_cma); | |
fa61a4e3 AK |
125 | } |
126 | } | |
441c19c8 | 127 | |
90fd09f8 SB |
128 | /* |
129 | * Real-mode H_CONFER implementation. | |
130 | * We check if we are the only vcpu out of this virtual core | |
131 | * still running in the guest and not ceded. If so, we pop up | |
132 | * to the virtual-mode implementation; if not, just return to | |
133 | * the guest. | |
134 | */ | |
135 | long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, | |
136 | unsigned int yield_count) | |
137 | { | |
ec257165 PM |
138 | struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; |
139 | int ptid = local_paca->kvm_hstate.ptid; | |
90fd09f8 SB |
140 | int threads_running; |
141 | int threads_ceded; | |
142 | int threads_conferring; | |
143 | u64 stop = get_tb() + 10 * tb_ticks_per_usec; | |
144 | int rv = H_SUCCESS; /* => don't yield */ | |
145 | ||
ec257165 | 146 | set_bit(ptid, &vc->conferring_threads); |
7d6c40da PM |
147 | while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { |
148 | threads_running = VCORE_ENTRY_MAP(vc); | |
149 | threads_ceded = vc->napping_threads; | |
150 | threads_conferring = vc->conferring_threads; | |
151 | if ((threads_ceded | threads_conferring) == threads_running) { | |
90fd09f8 SB |
152 | rv = H_TOO_HARD; /* => do yield */ |
153 | break; | |
154 | } | |
155 | } | |
ec257165 | 156 | clear_bit(ptid, &vc->conferring_threads); |
90fd09f8 SB |
157 | return rv; |
158 | } | |
159 | ||
441c19c8 ME |
160 | /* |
161 | * When running HV mode KVM we need to block certain operations while KVM VMs | |
162 | * exist in the system. We use a counter of VMs to track this. | |
163 | * | |
164 | * One of the operations we need to block is onlining of secondaries, so we | |
165 | * protect hv_vm_count with get/put_online_cpus(). | |
166 | */ | |
167 | static atomic_t hv_vm_count; | |
168 | ||
169 | void kvm_hv_vm_activated(void) | |
170 | { | |
171 | get_online_cpus(); | |
172 | atomic_inc(&hv_vm_count); | |
173 | put_online_cpus(); | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); | |
176 | ||
177 | void kvm_hv_vm_deactivated(void) | |
178 | { | |
179 | get_online_cpus(); | |
180 | atomic_dec(&hv_vm_count); | |
181 | put_online_cpus(); | |
182 | } | |
183 | EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); | |
184 | ||
185 | bool kvm_hv_mode_active(void) | |
186 | { | |
187 | return atomic_read(&hv_vm_count) != 0; | |
188 | } | |
ae2113a4 PM |
189 | |
190 | extern int hcall_real_table[], hcall_real_table_end[]; | |
191 | ||
192 | int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) | |
193 | { | |
194 | cmd /= 4; | |
195 | if (cmd < hcall_real_table_end - hcall_real_table && | |
196 | hcall_real_table[cmd]) | |
197 | return 1; | |
198 | ||
199 | return 0; | |
200 | } | |
201 | EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); | |
e928e9cb ME |
202 | |
203 | int kvmppc_hwrng_present(void) | |
204 | { | |
205 | return powernv_hwrng_present(); | |
206 | } | |
207 | EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); | |
208 | ||
209 | long kvmppc_h_random(struct kvm_vcpu *vcpu) | |
210 | { | |
acde2572 PM |
211 | int r; |
212 | ||
213 | /* Only need to do the expensive mfmsr() on radix */ | |
214 | if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) | |
1143a706 | 215 | r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]); |
acde2572 | 216 | else |
1143a706 | 217 | r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]); |
acde2572 | 218 | if (r) |
e928e9cb ME |
219 | return H_SUCCESS; |
220 | ||
221 | return H_HARDWARE; | |
222 | } | |
eddb60fb | 223 | |
eddb60fb | 224 | /* |
66feed61 | 225 | * Send an interrupt or message to another CPU. |
eddb60fb PM |
226 | * The caller needs to include any barrier needed to order writes |
227 | * to memory vs. the IPI/message. | |
228 | */ | |
229 | void kvmhv_rm_send_ipi(int cpu) | |
230 | { | |
d381d7ca | 231 | void __iomem *xics_phys; |
1704a81c | 232 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); |
eddb60fb | 233 | |
f3c18e93 PM |
234 | /* For a nested hypervisor, use the XICS via hcall */ |
235 | if (kvmhv_on_pseries()) { | |
236 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
237 | ||
238 | plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu), | |
239 | IPI_PRIORITY); | |
240 | return; | |
241 | } | |
242 | ||
1704a81c PM |
243 | /* On POWER9 we can use msgsnd for any destination cpu. */ |
244 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
245 | msg |= get_hard_smp_processor_id(cpu); | |
246 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | |
247 | return; | |
248 | } | |
5af50993 | 249 | |
1704a81c | 250 | /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ |
66feed61 PM |
251 | if (cpu_has_feature(CPU_FTR_ARCH_207S) && |
252 | cpu_first_thread_sibling(cpu) == | |
253 | cpu_first_thread_sibling(raw_smp_processor_id())) { | |
66feed61 PM |
254 | msg |= cpu_thread_in_core(cpu); |
255 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | |
256 | return; | |
257 | } | |
258 | ||
243e2511 | 259 | /* We should never reach this */ |
03f95332 | 260 | if (WARN_ON_ONCE(xics_on_xive())) |
243e2511 BH |
261 | return; |
262 | ||
66feed61 | 263 | /* Else poke the target with an IPI */ |
d2e60075 | 264 | xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; |
ab9bad0e | 265 | if (xics_phys) |
d381d7ca | 266 | __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); |
f725758b | 267 | else |
ab9bad0e | 268 | opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); |
eddb60fb PM |
269 | } |
270 | ||
271 | /* | |
272 | * The following functions are called from the assembly code | |
273 | * in book3s_hv_rmhandlers.S. | |
274 | */ | |
275 | static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) | |
276 | { | |
277 | int cpu = vc->pcpu; | |
278 | ||
279 | /* Order setting of exit map vs. msgsnd/IPI */ | |
280 | smp_mb(); | |
281 | for (; active; active >>= 1, ++cpu) | |
282 | if (active & 1) | |
283 | kvmhv_rm_send_ipi(cpu); | |
284 | } | |
285 | ||
286 | void kvmhv_commence_exit(int trap) | |
287 | { | |
288 | struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; | |
289 | int ptid = local_paca->kvm_hstate.ptid; | |
b4deba5c | 290 | struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; |
c0101509 PM |
291 | int me, ee, i, t; |
292 | int cpu0; | |
eddb60fb PM |
293 | |
294 | /* Set our bit in the threads-exiting-guest map in the 0xff00 | |
295 | bits of vcore->entry_exit_map */ | |
296 | me = 0x100 << ptid; | |
297 | do { | |
298 | ee = vc->entry_exit_map; | |
299 | } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); | |
300 | ||
301 | /* Are we the first here? */ | |
302 | if ((ee >> 8) != 0) | |
303 | return; | |
304 | ||
305 | /* | |
306 | * Trigger the other threads in this vcore to exit the guest. | |
307 | * If this is a hypervisor decrementer interrupt then they | |
308 | * will be already on their way out of the guest. | |
309 | */ | |
310 | if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) | |
311 | kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); | |
b4deba5c PM |
312 | |
313 | /* | |
314 | * If we are doing dynamic micro-threading, interrupt the other | |
315 | * subcores to pull them out of their guests too. | |
316 | */ | |
317 | if (!sip) | |
318 | return; | |
319 | ||
320 | for (i = 0; i < MAX_SUBCORES; ++i) { | |
898b25b2 | 321 | vc = sip->vc[i]; |
b4deba5c PM |
322 | if (!vc) |
323 | break; | |
324 | do { | |
325 | ee = vc->entry_exit_map; | |
326 | /* Already asked to exit? */ | |
327 | if ((ee >> 8) != 0) | |
328 | break; | |
329 | } while (cmpxchg(&vc->entry_exit_map, ee, | |
330 | ee | VCORE_EXIT_REQ) != ee); | |
331 | if ((ee >> 8) == 0) | |
332 | kvmhv_interrupt_vcore(vc, ee); | |
333 | } | |
c0101509 PM |
334 | |
335 | /* | |
336 | * On POWER9 when running a HPT guest on a radix host (sip != NULL), | |
337 | * we have to interrupt inactive CPU threads to get them to | |
338 | * restore the host LPCR value. | |
339 | */ | |
340 | if (sip->lpcr_req) { | |
341 | if (cmpxchg(&sip->do_restore, 0, 1) == 0) { | |
342 | vc = local_paca->kvm_hstate.kvm_vcore; | |
343 | cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid; | |
344 | for (t = 1; t < threads_per_core; ++t) { | |
345 | if (sip->napped[t]) | |
346 | kvmhv_rm_send_ipi(cpu0 + t); | |
347 | } | |
348 | } | |
349 | } | |
eddb60fb | 350 | } |
79b6c247 SW |
351 | |
352 | struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; | |
353 | EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); | |
37f55d30 | 354 | |
e3c13e56 SW |
355 | #ifdef CONFIG_KVM_XICS |
356 | static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, | |
357 | u32 xisr) | |
358 | { | |
359 | int i; | |
360 | ||
361 | /* | |
362 | * We access the mapped array here without a lock. That | |
363 | * is safe because we never reduce the number of entries | |
364 | * in the array and we never change the v_hwirq field of | |
365 | * an entry once it is set. | |
366 | * | |
367 | * We have also carefully ordered the stores in the writer | |
368 | * and the loads here in the reader, so that if we find a matching | |
369 | * hwirq here, the associated GSI and irq_desc fields are valid. | |
370 | */ | |
371 | for (i = 0; i < pimap->n_mapped; i++) { | |
372 | if (xisr == pimap->mapped[i].r_hwirq) { | |
373 | /* | |
374 | * Order subsequent reads in the caller to serialize | |
375 | * with the writer. | |
376 | */ | |
377 | smp_rmb(); | |
378 | return &pimap->mapped[i]; | |
379 | } | |
380 | } | |
381 | return NULL; | |
382 | } | |
383 | ||
384 | /* | |
385 | * If we have an interrupt that's not an IPI, check if we have a | |
386 | * passthrough adapter and if so, check if this external interrupt | |
387 | * is for the adapter. | |
388 | * We will attempt to deliver the IRQ directly to the target VCPU's | |
389 | * ICP, the virtual ICP (based on affinity - the xive value in ICS). | |
390 | * | |
391 | * If the delivery fails or if this is not for a passthrough adapter, | |
392 | * return to the host to handle this interrupt. We earlier | |
393 | * saved a copy of the XIRR in the PACA, it will be picked up by | |
394 | * the host ICP driver. | |
395 | */ | |
f725758b | 396 | static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) |
e3c13e56 SW |
397 | { |
398 | struct kvmppc_passthru_irqmap *pimap; | |
399 | struct kvmppc_irq_map *irq_map; | |
400 | struct kvm_vcpu *vcpu; | |
401 | ||
402 | vcpu = local_paca->kvm_hstate.kvm_vcpu; | |
403 | if (!vcpu) | |
404 | return 1; | |
405 | pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); | |
406 | if (!pimap) | |
407 | return 1; | |
408 | irq_map = get_irqmap(pimap, xisr); | |
409 | if (!irq_map) | |
410 | return 1; | |
411 | ||
412 | /* We're handling this interrupt, generic code doesn't need to */ | |
413 | local_paca->kvm_hstate.saved_xirr = 0; | |
414 | ||
f725758b | 415 | return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); |
e3c13e56 SW |
416 | } |
417 | ||
418 | #else | |
e2702871 | 419 | static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) |
e3c13e56 SW |
420 | { |
421 | return 1; | |
422 | } | |
423 | #endif | |
424 | ||
37f55d30 SW |
425 | /* |
426 | * Determine what sort of external interrupt is pending (if any). | |
427 | * Returns: | |
428 | * 0 if no interrupt is pending | |
429 | * 1 if an interrupt is pending that needs to be handled by the host | |
f7af5209 | 430 | * 2 Passthrough that needs completion in the host |
37f55d30 | 431 | * -1 if there was a guest wakeup IPI (which has now been cleared) |
e3c13e56 | 432 | * -2 if there is PCI passthrough external interrupt that was handled |
37f55d30 | 433 | */ |
f725758b | 434 | static long kvmppc_read_one_intr(bool *again); |
37f55d30 SW |
435 | |
436 | long kvmppc_read_intr(void) | |
f725758b PM |
437 | { |
438 | long ret = 0; | |
439 | long rc; | |
440 | bool again; | |
441 | ||
243e2511 BH |
442 | if (xive_enabled()) |
443 | return 1; | |
444 | ||
f725758b PM |
445 | do { |
446 | again = false; | |
447 | rc = kvmppc_read_one_intr(&again); | |
448 | if (rc && (ret == 0 || rc > ret)) | |
449 | ret = rc; | |
450 | } while (again); | |
451 | return ret; | |
452 | } | |
453 | ||
454 | static long kvmppc_read_one_intr(bool *again) | |
37f55d30 | 455 | { |
d381d7ca | 456 | void __iomem *xics_phys; |
37f55d30 SW |
457 | u32 h_xirr; |
458 | __be32 xirr; | |
459 | u32 xisr; | |
460 | u8 host_ipi; | |
f725758b | 461 | int64_t rc; |
37f55d30 | 462 | |
5af50993 BH |
463 | if (xive_enabled()) |
464 | return 1; | |
465 | ||
37f55d30 SW |
466 | /* see if a host IPI is pending */ |
467 | host_ipi = local_paca->kvm_hstate.host_ipi; | |
468 | if (host_ipi) | |
469 | return 1; | |
470 | ||
471 | /* Now read the interrupt from the ICP */ | |
f3c18e93 PM |
472 | if (kvmhv_on_pseries()) { |
473 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
474 | ||
475 | rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF); | |
476 | xirr = cpu_to_be32(retbuf[0]); | |
477 | } else { | |
478 | xics_phys = local_paca->kvm_hstate.xics_phys; | |
479 | rc = 0; | |
480 | if (!xics_phys) | |
481 | rc = opal_int_get_xirr(&xirr, false); | |
482 | else | |
483 | xirr = __raw_rm_readl(xics_phys + XICS_XIRR); | |
484 | } | |
53af3ba2 PM |
485 | if (rc < 0) |
486 | return 1; | |
37f55d30 SW |
487 | |
488 | /* | |
489 | * Save XIRR for later. Since we get control in reverse endian | |
490 | * on LE systems, save it byte reversed and fetch it back in | |
491 | * host endian. Note that xirr is the value read from the | |
492 | * XIRR register, while h_xirr is the host endian version. | |
493 | */ | |
37f55d30 SW |
494 | h_xirr = be32_to_cpu(xirr); |
495 | local_paca->kvm_hstate.saved_xirr = h_xirr; | |
496 | xisr = h_xirr & 0xffffff; | |
497 | /* | |
498 | * Ensure that the store/load complete to guarantee all side | |
499 | * effects of loading from XIRR has completed | |
500 | */ | |
501 | smp_mb(); | |
502 | ||
503 | /* if nothing pending in the ICP */ | |
504 | if (!xisr) | |
505 | return 0; | |
506 | ||
507 | /* We found something in the ICP... | |
508 | * | |
509 | * If it is an IPI, clear the MFRR and EOI it. | |
510 | */ | |
511 | if (xisr == XICS_IPI) { | |
53af3ba2 | 512 | rc = 0; |
f3c18e93 PM |
513 | if (kvmhv_on_pseries()) { |
514 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
515 | ||
516 | plpar_hcall_raw(H_IPI, retbuf, | |
517 | hard_smp_processor_id(), 0xff); | |
518 | plpar_hcall_raw(H_EOI, retbuf, h_xirr); | |
519 | } else if (xics_phys) { | |
d381d7ca BH |
520 | __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); |
521 | __raw_rm_writel(xirr, xics_phys + XICS_XIRR); | |
f725758b | 522 | } else { |
ab9bad0e BH |
523 | opal_int_set_mfrr(hard_smp_processor_id(), 0xff); |
524 | rc = opal_int_eoi(h_xirr); | |
f725758b | 525 | } |
53af3ba2 PM |
526 | /* If rc > 0, there is another interrupt pending */ |
527 | *again = rc > 0; | |
f725758b | 528 | |
37f55d30 SW |
529 | /* |
530 | * Need to ensure side effects of above stores | |
531 | * complete before proceeding. | |
532 | */ | |
533 | smp_mb(); | |
534 | ||
535 | /* | |
536 | * We need to re-check host IPI now in case it got set in the | |
537 | * meantime. If it's clear, we bounce the interrupt to the | |
538 | * guest | |
539 | */ | |
540 | host_ipi = local_paca->kvm_hstate.host_ipi; | |
541 | if (unlikely(host_ipi != 0)) { | |
542 | /* We raced with the host, | |
543 | * we need to resend that IPI, bummer | |
544 | */ | |
f3c18e93 PM |
545 | if (kvmhv_on_pseries()) { |
546 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
547 | ||
548 | plpar_hcall_raw(H_IPI, retbuf, | |
549 | hard_smp_processor_id(), | |
550 | IPI_PRIORITY); | |
551 | } else if (xics_phys) | |
d381d7ca BH |
552 | __raw_rm_writeb(IPI_PRIORITY, |
553 | xics_phys + XICS_MFRR); | |
f725758b | 554 | else |
ab9bad0e BH |
555 | opal_int_set_mfrr(hard_smp_processor_id(), |
556 | IPI_PRIORITY); | |
37f55d30 SW |
557 | /* Let side effects complete */ |
558 | smp_mb(); | |
559 | return 1; | |
560 | } | |
561 | ||
562 | /* OK, it's an IPI for us */ | |
563 | local_paca->kvm_hstate.saved_xirr = 0; | |
564 | return -1; | |
565 | } | |
566 | ||
f725758b | 567 | return kvmppc_check_passthru(xisr, xirr, again); |
37f55d30 | 568 | } |
5af50993 BH |
569 | |
570 | #ifdef CONFIG_KVM_XICS | |
571 | static inline bool is_rm(void) | |
572 | { | |
573 | return !(mfmsr() & MSR_DR); | |
574 | } | |
575 | ||
576 | unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) | |
577 | { | |
00bb6ae5 PM |
578 | if (!kvmppc_xics_enabled(vcpu)) |
579 | return H_TOO_HARD; | |
03f95332 | 580 | if (xics_on_xive()) { |
5af50993 BH |
581 | if (is_rm()) |
582 | return xive_rm_h_xirr(vcpu); | |
583 | if (unlikely(!__xive_vm_h_xirr)) | |
584 | return H_NOT_AVAILABLE; | |
585 | return __xive_vm_h_xirr(vcpu); | |
586 | } else | |
587 | return xics_rm_h_xirr(vcpu); | |
588 | } | |
589 | ||
590 | unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) | |
591 | { | |
00bb6ae5 PM |
592 | if (!kvmppc_xics_enabled(vcpu)) |
593 | return H_TOO_HARD; | |
1143a706 | 594 | vcpu->arch.regs.gpr[5] = get_tb(); |
03f95332 | 595 | if (xics_on_xive()) { |
5af50993 BH |
596 | if (is_rm()) |
597 | return xive_rm_h_xirr(vcpu); | |
598 | if (unlikely(!__xive_vm_h_xirr)) | |
599 | return H_NOT_AVAILABLE; | |
600 | return __xive_vm_h_xirr(vcpu); | |
601 | } else | |
602 | return xics_rm_h_xirr(vcpu); | |
603 | } | |
604 | ||
605 | unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) | |
606 | { | |
00bb6ae5 PM |
607 | if (!kvmppc_xics_enabled(vcpu)) |
608 | return H_TOO_HARD; | |
03f95332 | 609 | if (xics_on_xive()) { |
5af50993 BH |
610 | if (is_rm()) |
611 | return xive_rm_h_ipoll(vcpu, server); | |
612 | if (unlikely(!__xive_vm_h_ipoll)) | |
613 | return H_NOT_AVAILABLE; | |
614 | return __xive_vm_h_ipoll(vcpu, server); | |
615 | } else | |
616 | return H_TOO_HARD; | |
617 | } | |
618 | ||
619 | int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, | |
620 | unsigned long mfrr) | |
621 | { | |
00bb6ae5 PM |
622 | if (!kvmppc_xics_enabled(vcpu)) |
623 | return H_TOO_HARD; | |
03f95332 | 624 | if (xics_on_xive()) { |
5af50993 BH |
625 | if (is_rm()) |
626 | return xive_rm_h_ipi(vcpu, server, mfrr); | |
627 | if (unlikely(!__xive_vm_h_ipi)) | |
628 | return H_NOT_AVAILABLE; | |
629 | return __xive_vm_h_ipi(vcpu, server, mfrr); | |
630 | } else | |
631 | return xics_rm_h_ipi(vcpu, server, mfrr); | |
632 | } | |
633 | ||
634 | int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) | |
635 | { | |
00bb6ae5 PM |
636 | if (!kvmppc_xics_enabled(vcpu)) |
637 | return H_TOO_HARD; | |
03f95332 | 638 | if (xics_on_xive()) { |
5af50993 BH |
639 | if (is_rm()) |
640 | return xive_rm_h_cppr(vcpu, cppr); | |
641 | if (unlikely(!__xive_vm_h_cppr)) | |
642 | return H_NOT_AVAILABLE; | |
643 | return __xive_vm_h_cppr(vcpu, cppr); | |
644 | } else | |
645 | return xics_rm_h_cppr(vcpu, cppr); | |
646 | } | |
647 | ||
648 | int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) | |
649 | { | |
00bb6ae5 PM |
650 | if (!kvmppc_xics_enabled(vcpu)) |
651 | return H_TOO_HARD; | |
03f95332 | 652 | if (xics_on_xive()) { |
5af50993 BH |
653 | if (is_rm()) |
654 | return xive_rm_h_eoi(vcpu, xirr); | |
655 | if (unlikely(!__xive_vm_h_eoi)) | |
656 | return H_NOT_AVAILABLE; | |
657 | return __xive_vm_h_eoi(vcpu, xirr); | |
658 | } else | |
659 | return xics_rm_h_eoi(vcpu, xirr); | |
660 | } | |
661 | #endif /* CONFIG_KVM_XICS */ | |
857b99e1 PM |
662 | |
663 | void kvmppc_bad_interrupt(struct pt_regs *regs) | |
664 | { | |
7c1bd80c NP |
665 | /* |
666 | * 100 could happen at any time, 200 can happen due to invalid real | |
667 | * address access for example (or any time due to a hardware problem). | |
668 | */ | |
669 | if (TRAP(regs) == 0x100) { | |
670 | get_paca()->in_nmi++; | |
671 | system_reset_exception(regs); | |
672 | get_paca()->in_nmi--; | |
673 | } else if (TRAP(regs) == 0x200) { | |
674 | machine_check_exception(regs); | |
675 | } else { | |
676 | die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); | |
677 | } | |
857b99e1 PM |
678 | panic("Bad KVM trap"); |
679 | } | |
c0101509 PM |
680 | |
681 | /* | |
682 | * Functions used to switch LPCR HR and UPRT bits on all threads | |
683 | * when entering and exiting HPT guests on a radix host. | |
684 | */ | |
685 | ||
686 | #define PHASE_REALMODE 1 /* in real mode */ | |
687 | #define PHASE_SET_LPCR 2 /* have set LPCR */ | |
688 | #define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */ | |
689 | #define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */ | |
690 | ||
691 | #define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p)) | |
692 | ||
693 | static void wait_for_sync(struct kvm_split_mode *sip, int phase) | |
694 | { | |
695 | int thr = local_paca->kvm_hstate.tid; | |
696 | ||
697 | sip->lpcr_sync.phase[thr] |= phase; | |
698 | phase = ALL(phase); | |
699 | while ((sip->lpcr_sync.allphases & phase) != phase) { | |
700 | HMT_low(); | |
701 | barrier(); | |
702 | } | |
703 | HMT_medium(); | |
704 | } | |
705 | ||
706 | void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip) | |
707 | { | |
708 | unsigned long rb, set; | |
709 | ||
710 | /* wait for every other thread to get to real mode */ | |
711 | wait_for_sync(sip, PHASE_REALMODE); | |
712 | ||
713 | /* Set LPCR and LPIDR */ | |
714 | mtspr(SPRN_LPCR, sip->lpcr_req); | |
715 | mtspr(SPRN_LPID, sip->lpidr_req); | |
716 | isync(); | |
717 | ||
718 | /* Invalidate the TLB on thread 0 */ | |
719 | if (local_paca->kvm_hstate.tid == 0) { | |
720 | sip->do_set = 0; | |
721 | asm volatile("ptesync" : : : "memory"); | |
722 | for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) { | |
723 | rb = TLBIEL_INVAL_SET_LPID + | |
724 | (set << TLBIEL_INVAL_SET_SHIFT); | |
725 | asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : : | |
726 | "r" (rb), "r" (0)); | |
727 | } | |
728 | asm volatile("ptesync" : : : "memory"); | |
729 | } | |
730 | ||
731 | /* indicate that we have done so and wait for others */ | |
732 | wait_for_sync(sip, PHASE_SET_LPCR); | |
733 | /* order read of sip->lpcr_sync.allphases vs. sip->do_set */ | |
734 | smp_rmb(); | |
735 | } | |
736 | ||
737 | /* | |
738 | * Called when a thread that has been in the guest needs | |
739 | * to reload the host LPCR value - but only on POWER9 when | |
740 | * running a HPT guest on a radix host. | |
741 | */ | |
742 | void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) | |
743 | { | |
744 | /* we're out of the guest... */ | |
745 | wait_for_sync(sip, PHASE_OUT_OF_GUEST); | |
746 | ||
747 | mtspr(SPRN_LPID, 0); | |
748 | mtspr(SPRN_LPCR, sip->host_lpcr); | |
749 | isync(); | |
750 | ||
751 | if (local_paca->kvm_hstate.tid == 0) { | |
752 | sip->do_restore = 0; | |
753 | smp_wmb(); /* order store of do_restore vs. phase */ | |
754 | } | |
755 | ||
756 | wait_for_sync(sip, PHASE_RESET_LPCR); | |
757 | smp_mb(); | |
758 | local_paca->kvm_hstate.kvm_split_mode = NULL; | |
759 | } | |
f7035ce9 PM |
760 | |
761 | /* | |
762 | * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? | |
763 | * Can we inject a Decrementer or a External interrupt? | |
764 | */ | |
765 | void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) | |
766 | { | |
767 | int ext; | |
768 | unsigned long vec = 0; | |
769 | unsigned long lpcr; | |
770 | ||
771 | /* Insert EXTERNAL bit into LPCR at the MER bit position */ | |
772 | ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; | |
773 | lpcr = mfspr(SPRN_LPCR); | |
774 | lpcr |= ext << LPCR_MER_SH; | |
775 | mtspr(SPRN_LPCR, lpcr); | |
776 | isync(); | |
777 | ||
778 | if (vcpu->arch.shregs.msr & MSR_EE) { | |
779 | if (ext) { | |
780 | vec = BOOK3S_INTERRUPT_EXTERNAL; | |
781 | } else { | |
782 | long int dec = mfspr(SPRN_DEC); | |
783 | if (!(lpcr & LPCR_LD)) | |
784 | dec = (int) dec; | |
785 | if (dec < 0) | |
786 | vec = BOOK3S_INTERRUPT_DECREMENTER; | |
787 | } | |
788 | } | |
789 | if (vec) { | |
790 | unsigned long msr, old_msr = vcpu->arch.shregs.msr; | |
791 | ||
792 | kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); | |
793 | kvmppc_set_srr1(vcpu, old_msr); | |
794 | kvmppc_set_pc(vcpu, vec); | |
795 | msr = vcpu->arch.intr_msr; | |
796 | if (MSR_TM_ACTIVE(old_msr)) | |
797 | msr |= MSR_TS_S; | |
798 | vcpu->arch.shregs.msr = msr; | |
799 | } | |
800 | ||
801 | if (vcpu->arch.doorbell_request) { | |
802 | mtspr(SPRN_DPDES, 1); | |
803 | vcpu->arch.vcore->dpdes = 1; | |
804 | smp_wmb(); | |
805 | vcpu->arch.doorbell_request = 0; | |
806 | } | |
807 | } | |
2940ba0c | 808 | |
70ea13f6 | 809 | static void flush_guest_tlb(struct kvm *kvm) |
2940ba0c | 810 | { |
2940ba0c PM |
811 | unsigned long rb, set; |
812 | ||
70ea13f6 PM |
813 | rb = PPC_BIT(52); /* IS = 2 */ |
814 | if (kvm_is_radix(kvm)) { | |
815 | /* R=1 PRS=1 RIC=2 */ | |
816 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) | |
817 | : : "r" (rb), "i" (1), "i" (1), "i" (2), | |
818 | "r" (0) : "memory"); | |
819 | for (set = 1; set < kvm->arch.tlb_sets; ++set) { | |
820 | rb += PPC_BIT(51); /* increment set number */ | |
821 | /* R=1 PRS=1 RIC=0 */ | |
822 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) | |
823 | : : "r" (rb), "i" (1), "i" (1), "i" (0), | |
824 | "r" (0) : "memory"); | |
825 | } | |
826 | } else { | |
827 | for (set = 0; set < kvm->arch.tlb_sets; ++set) { | |
828 | /* R=0 PRS=0 RIC=0 */ | |
829 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) | |
830 | : : "r" (rb), "i" (0), "i" (0), "i" (0), | |
831 | "r" (0) : "memory"); | |
832 | rb += PPC_BIT(51); /* increment set number */ | |
833 | } | |
834 | } | |
835 | asm volatile("ptesync": : :"memory"); | |
fe7946ce | 836 | asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); |
70ea13f6 PM |
837 | } |
838 | ||
839 | void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, | |
840 | struct kvm_nested_guest *nested) | |
841 | { | |
842 | cpumask_t *need_tlb_flush; | |
843 | ||
2940ba0c PM |
844 | /* |
845 | * On POWER9, individual threads can come in here, but the | |
846 | * TLB is shared between the 4 threads in a core, hence | |
847 | * invalidating on one thread invalidates for all. | |
848 | * Thus we make all 4 threads use the same bit. | |
849 | */ | |
850 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
851 | pcpu = cpu_first_thread_sibling(pcpu); | |
852 | ||
70ea13f6 PM |
853 | if (nested) |
854 | need_tlb_flush = &nested->need_tlb_flush; | |
855 | else | |
856 | need_tlb_flush = &kvm->arch.need_tlb_flush; | |
857 | ||
858 | if (cpumask_test_cpu(pcpu, need_tlb_flush)) { | |
859 | flush_guest_tlb(kvm); | |
2940ba0c PM |
860 | |
861 | /* Clear the bit after the TLB flush */ | |
70ea13f6 | 862 | cpumask_clear_cpu(pcpu, need_tlb_flush); |
2940ba0c PM |
863 | } |
864 | } | |
70ea13f6 | 865 | EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush); |