Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
5f94c174 LV |
2 | /* |
3 | * KVM coalesced MMIO | |
4 | * | |
5 | * Copyright (c) 2008 Bull S.A.S. | |
221d059d | 6 | * Copyright 2009 Red Hat, Inc. and/or its affiliates. |
5f94c174 LV |
7 | * |
8 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> | |
9 | * | |
10 | */ | |
11 | ||
af669ac6 | 12 | #include <kvm/iodev.h> |
5f94c174 LV |
13 | |
14 | #include <linux/kvm_host.h> | |
5a0e3ad6 | 15 | #include <linux/slab.h> |
5f94c174 LV |
16 | #include <linux/kvm.h> |
17 | ||
18 | #include "coalesced_mmio.h" | |
19 | ||
d76685c4 GH |
20 | static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) |
21 | { | |
22 | return container_of(dev, struct kvm_coalesced_mmio_dev, dev); | |
23 | } | |
24 | ||
bda9020e MT |
25 | static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, |
26 | gpa_t addr, int len) | |
5f94c174 | 27 | { |
2b3c246a SL |
28 | /* is it in a batchable area ? |
29 | * (addr,len) is fully included in | |
30 | * (zone->addr, zone->size) | |
31 | */ | |
1a214246 DC |
32 | if (len < 0) |
33 | return 0; | |
34 | if (addr + len < addr) | |
35 | return 0; | |
36 | if (addr < dev->zone.addr) | |
37 | return 0; | |
38 | if (addr + len > dev->zone.addr + dev->zone.size) | |
39 | return 0; | |
40 | return 1; | |
5f94c174 LV |
41 | } |
42 | ||
b60fe990 | 43 | static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) |
c298125f SL |
44 | { |
45 | struct kvm_coalesced_mmio_ring *ring; | |
46 | unsigned avail; | |
47 | ||
48 | /* Are we able to batch it ? */ | |
49 | ||
50 | /* last is the first free entry | |
51 | * check if we don't meet the first used entry | |
52 | * there is always one unused entry in the buffer | |
53 | */ | |
54 | ring = dev->kvm->coalesced_mmio_ring; | |
b60fe990 | 55 | avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; |
c298125f SL |
56 | if (avail == 0) { |
57 | /* full */ | |
58 | return 0; | |
59 | } | |
60 | ||
61 | return 1; | |
62 | } | |
63 | ||
e32edf4f NN |
64 | static int coalesced_mmio_write(struct kvm_vcpu *vcpu, |
65 | struct kvm_io_device *this, gpa_t addr, | |
66 | int len, const void *val) | |
5f94c174 | 67 | { |
d76685c4 | 68 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
5f94c174 | 69 | struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; |
b60fe990 | 70 | __u32 insert; |
c298125f | 71 | |
bda9020e MT |
72 | if (!coalesced_mmio_in_range(dev, addr, len)) |
73 | return -EOPNOTSUPP; | |
5f94c174 | 74 | |
2b3c246a | 75 | spin_lock(&dev->kvm->ring_lock); |
5f94c174 | 76 | |
b60fe990 MD |
77 | insert = READ_ONCE(ring->last); |
78 | if (!coalesced_mmio_has_room(dev, insert) || | |
79 | insert >= KVM_COALESCED_MMIO_MAX) { | |
2b3c246a | 80 | spin_unlock(&dev->kvm->ring_lock); |
c298125f SL |
81 | return -EOPNOTSUPP; |
82 | } | |
83 | ||
5f94c174 LV |
84 | /* copy data in first free entry of the ring */ |
85 | ||
b60fe990 MD |
86 | ring->coalesced_mmio[insert].phys_addr = addr; |
87 | ring->coalesced_mmio[insert].len = len; | |
88 | memcpy(ring->coalesced_mmio[insert].data, val, len); | |
89 | ring->coalesced_mmio[insert].pio = dev->zone.pio; | |
5f94c174 | 90 | smp_wmb(); |
b60fe990 | 91 | ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; |
2b3c246a | 92 | spin_unlock(&dev->kvm->ring_lock); |
bda9020e | 93 | return 0; |
5f94c174 LV |
94 | } |
95 | ||
96 | static void coalesced_mmio_destructor(struct kvm_io_device *this) | |
97 | { | |
d76685c4 | 98 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
787a660a | 99 | |
2b3c246a SL |
100 | list_del(&dev->list); |
101 | ||
787a660a | 102 | kfree(dev); |
5f94c174 LV |
103 | } |
104 | ||
d76685c4 GH |
105 | static const struct kvm_io_device_ops coalesced_mmio_ops = { |
106 | .write = coalesced_mmio_write, | |
d76685c4 GH |
107 | .destructor = coalesced_mmio_destructor, |
108 | }; | |
109 | ||
5f94c174 LV |
110 | int kvm_coalesced_mmio_init(struct kvm *kvm) |
111 | { | |
980da6ce | 112 | struct page *page; |
5f94c174 | 113 | |
93bb59ca | 114 | page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
980da6ce | 115 | if (!page) |
b139b5a2 | 116 | return -ENOMEM; |
980da6ce | 117 | |
2b3c246a | 118 | kvm->coalesced_mmio_ring = page_address(page); |
980da6ce | 119 | |
2b3c246a SL |
120 | /* |
121 | * We're using this spinlock to sync access to the coalesced ring. | |
656012c7 | 122 | * The list doesn't need its own lock since device registration and |
2b3c246a SL |
123 | * unregistration should only happen when kvm->slots_lock is held. |
124 | */ | |
125 | spin_lock_init(&kvm->ring_lock); | |
126 | INIT_LIST_HEAD(&kvm->coalesced_zones); | |
090b7aff | 127 | |
b139b5a2 | 128 | return 0; |
5f94c174 LV |
129 | } |
130 | ||
980da6ce AK |
131 | void kvm_coalesced_mmio_free(struct kvm *kvm) |
132 | { | |
133 | if (kvm->coalesced_mmio_ring) | |
134 | free_page((unsigned long)kvm->coalesced_mmio_ring); | |
135 | } | |
136 | ||
5f94c174 | 137 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, |
43db6697 | 138 | struct kvm_coalesced_mmio_zone *zone) |
5f94c174 | 139 | { |
2b3c246a SL |
140 | int ret; |
141 | struct kvm_coalesced_mmio_dev *dev; | |
5f94c174 | 142 | |
0804c849 PH |
143 | if (zone->pio != 1 && zone->pio != 0) |
144 | return -EINVAL; | |
145 | ||
b12ce36a BG |
146 | dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), |
147 | GFP_KERNEL_ACCOUNT); | |
2b3c246a SL |
148 | if (!dev) |
149 | return -ENOMEM; | |
150 | ||
151 | kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); | |
152 | dev->kvm = kvm; | |
153 | dev->zone = *zone; | |
5f94c174 | 154 | |
79fac95e | 155 | mutex_lock(&kvm->slots_lock); |
0804c849 PH |
156 | ret = kvm_io_bus_register_dev(kvm, |
157 | zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, | |
158 | zone->addr, zone->size, &dev->dev); | |
2b3c246a SL |
159 | if (ret < 0) |
160 | goto out_free_dev; | |
161 | list_add_tail(&dev->list, &kvm->coalesced_zones); | |
162 | mutex_unlock(&kvm->slots_lock); | |
5f94c174 | 163 | |
aac5c422 | 164 | return 0; |
5f94c174 | 165 | |
2b3c246a | 166 | out_free_dev: |
79fac95e | 167 | mutex_unlock(&kvm->slots_lock); |
2b3c246a SL |
168 | kfree(dev); |
169 | ||
aac5c422 | 170 | return ret; |
5f94c174 LV |
171 | } |
172 | ||
173 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, | |
174 | struct kvm_coalesced_mmio_zone *zone) | |
175 | { | |
2b3c246a | 176 | struct kvm_coalesced_mmio_dev *dev, *tmp; |
5d3c4c79 | 177 | int r; |
5f94c174 | 178 | |
987d1149 EB |
179 | if (zone->pio != 1 && zone->pio != 0) |
180 | return -EINVAL; | |
181 | ||
79fac95e | 182 | mutex_lock(&kvm->slots_lock); |
5f94c174 | 183 | |
5d3c4c79 | 184 | list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) { |
987d1149 EB |
185 | if (zone->pio == dev->zone.pio && |
186 | coalesced_mmio_in_range(dev, zone->addr, zone->size)) { | |
5d3c4c79 | 187 | r = kvm_io_bus_unregister_dev(kvm, |
0804c849 | 188 | zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); |
5d3c4c79 SC |
189 | |
190 | /* | |
191 | * On failure, unregister destroys all devices on the | |
192 | * bus _except_ the target device, i.e. coalesced_zones | |
193 | * has been modified. No need to restart the walk as | |
194 | * there aren't any zones left. | |
195 | */ | |
196 | if (r) | |
197 | break; | |
23fa2e46 | 198 | kvm_iodevice_destructor(&dev->dev); |
5f94c174 | 199 | } |
5d3c4c79 | 200 | } |
5f94c174 | 201 | |
79fac95e | 202 | mutex_unlock(&kvm->slots_lock); |
5f94c174 | 203 | |
5d3c4c79 SC |
204 | /* |
205 | * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's | |
206 | * perspective, the coalesced MMIO is most definitely unregistered. | |
207 | */ | |
5f94c174 LV |
208 | return 0; |
209 | } |