Commit | Line | Data |
---|---|---|
4ccefbe5 SS |
1 | /* |
2 | * Xen stolen ticks accounting. | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/kernel_stat.h> | |
6 | #include <linux/math64.h> | |
7 | #include <linux/gfp.h> | |
8 | ||
9 | #include <asm/xen/hypervisor.h> | |
10 | #include <asm/xen/hypercall.h> | |
11 | ||
12 | #include <xen/events.h> | |
13 | #include <xen/features.h> | |
14 | #include <xen/interface/xen.h> | |
15 | #include <xen/interface/vcpu.h> | |
16 | #include <xen/xen-ops.h> | |
17 | ||
18 | /* runstate info updated by Xen */ | |
19 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); | |
20 | ||
21 | /* return an consistent snapshot of 64-bit time/counter value */ | |
22 | static u64 get64(const u64 *p) | |
23 | { | |
24 | u64 ret; | |
25 | ||
26 | if (BITS_PER_LONG < 64) { | |
27 | u32 *p32 = (u32 *)p; | |
28 | u32 h, l; | |
29 | ||
30 | /* | |
31 | * Read high then low, and then make sure high is | |
32 | * still the same; this will only loop if low wraps | |
33 | * and carries into high. | |
34 | * XXX some clean way to make this endian-proof? | |
35 | */ | |
36 | do { | |
37 | h = p32[1]; | |
38 | barrier(); | |
39 | l = p32[0]; | |
40 | barrier(); | |
41 | } while (p32[1] != h); | |
42 | ||
43 | ret = (((u64)h) << 32) | l; | |
44 | } else | |
45 | ret = *p; | |
46 | ||
47 | return ret; | |
48 | } | |
49 | ||
50 | /* | |
51 | * Runstate accounting | |
52 | */ | |
53 | void xen_get_runstate_snapshot(struct vcpu_runstate_info *res) | |
54 | { | |
55 | u64 state_time; | |
56 | struct vcpu_runstate_info *state; | |
57 | ||
58 | BUG_ON(preemptible()); | |
59 | ||
60 | state = this_cpu_ptr(&xen_runstate); | |
61 | ||
62 | /* | |
63 | * The runstate info is always updated by the hypervisor on | |
64 | * the current CPU, so there's no need to use anything | |
65 | * stronger than a compiler barrier when fetching it. | |
66 | */ | |
67 | do { | |
68 | state_time = get64(&state->state_entry_time); | |
69 | barrier(); | |
70 | *res = *state; | |
71 | barrier(); | |
72 | } while (get64(&state->state_entry_time) != state_time); | |
73 | } | |
74 | ||
75 | /* return true when a vcpu could run but has no real cpu to run on */ | |
76 | bool xen_vcpu_stolen(int vcpu) | |
77 | { | |
78 | return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; | |
79 | } | |
80 | ||
81 | void xen_setup_runstate_info(int cpu) | |
82 | { | |
83 | struct vcpu_register_runstate_memory_area area; | |
84 | ||
85 | area.addr.v = &per_cpu(xen_runstate, cpu); | |
86 | ||
87 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, | |
88 | cpu, &area)) | |
89 | BUG(); | |
90 | } | |
91 |