Merge tag 'gpio-v5.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux...
[linux-2.6-block.git] / arch / arm64 / kvm / pvtime.c
CommitLineData
b48c1a45
SP
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2019 Arm Ltd.
3
4#include <linux/arm-smccc.h>
58772e9a 5#include <linux/kvm_host.h>
a25e9102 6#include <linux/sched/stat.h>
b48c1a45 7
58772e9a 8#include <asm/kvm_mmu.h>
8564d637
SP
9#include <asm/pvclock-abi.h>
10
b48c1a45
SP
11#include <kvm/arm_hypercalls.h>
12
8564d637
SP
13void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
14{
15 struct kvm *kvm = vcpu->kvm;
53f98558 16 u64 base = vcpu->arch.steal.base;
2dbd780e 17 u64 last_steal = vcpu->arch.steal.last_steal;
53f98558
AJ
18 u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
19 u64 steal = 0;
8564d637 20 int idx;
8564d637
SP
21
22 if (base == GPA_INVALID)
23 return;
24
8564d637 25 idx = srcu_read_lock(&kvm->srcu);
53f98558
AJ
26 if (!kvm_get_guest(kvm, base + offset, steal)) {
27 steal = le64_to_cpu(steal);
28 vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
29 steal += vcpu->arch.steal.last_steal - last_steal;
30 kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
31 }
8564d637
SP
32 srcu_read_unlock(&kvm->srcu, idx);
33}
34
b48c1a45
SP
35long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
36{
37 u32 feature = smccc_get_arg1(vcpu);
38 long val = SMCCC_RET_NOT_SUPPORTED;
39
40 switch (feature) {
41 case ARM_SMCCC_HV_PV_TIME_FEATURES:
8564d637 42 case ARM_SMCCC_HV_PV_TIME_ST:
38480df5
AJ
43 if (vcpu->arch.steal.base != GPA_INVALID)
44 val = SMCCC_RET_SUCCESS;
b48c1a45
SP
45 break;
46 }
47
48 return val;
49}
8564d637
SP
50
51gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
52{
53 struct pvclock_vcpu_stolen_time init_values = {};
54 struct kvm *kvm = vcpu->kvm;
55 u64 base = vcpu->arch.steal.base;
56 int idx;
57
58 if (base == GPA_INVALID)
59 return base;
60
61 /*
62 * Start counting stolen time from the time the guest requests
63 * the feature enabled.
64 */
8564d637
SP
65 vcpu->arch.steal.last_steal = current->sched_info.run_delay;
66
67 idx = srcu_read_lock(&kvm->srcu);
68 kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
69 srcu_read_unlock(&kvm->srcu, idx);
70
71 return base;
72}
58772e9a 73
004a0124 74bool kvm_arm_pvtime_supported(void)
a25e9102
AJ
75{
76 return !!sched_info_on();
77}
78
58772e9a
SP
79int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
80 struct kvm_device_attr *attr)
81{
82 u64 __user *user = (u64 __user *)attr->addr;
83 struct kvm *kvm = vcpu->kvm;
84 u64 ipa;
85 int ret = 0;
86 int idx;
87
a25e9102
AJ
88 if (!kvm_arm_pvtime_supported() ||
89 attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
58772e9a
SP
90 return -ENXIO;
91
92 if (get_user(ipa, user))
93 return -EFAULT;
94 if (!IS_ALIGNED(ipa, 64))
95 return -EINVAL;
96 if (vcpu->arch.steal.base != GPA_INVALID)
97 return -EEXIST;
98
99 /* Check the address is in a valid memslot */
100 idx = srcu_read_lock(&kvm->srcu);
101 if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
102 ret = -EINVAL;
103 srcu_read_unlock(&kvm->srcu, idx);
104
105 if (!ret)
106 vcpu->arch.steal.base = ipa;
107
108 return ret;
109}
110
111int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
112 struct kvm_device_attr *attr)
113{
114 u64 __user *user = (u64 __user *)attr->addr;
115 u64 ipa;
116
a25e9102
AJ
117 if (!kvm_arm_pvtime_supported() ||
118 attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
58772e9a
SP
119 return -ENXIO;
120
121 ipa = vcpu->arch.steal.base;
122
123 if (put_user(ipa, user))
124 return -EFAULT;
125 return 0;
126}
127
128int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
129 struct kvm_device_attr *attr)
130{
131 switch (attr->attr) {
132 case KVM_ARM_VCPU_PVTIME_IPA:
a25e9102
AJ
133 if (kvm_arm_pvtime_supported())
134 return 0;
58772e9a
SP
135 }
136 return -ENXIO;
137}