virtio_pci: split out legacy device support
[linux-2.6-block.git] / drivers / virtio / virtio_pci_legacy.c
CommitLineData
38eb4a29
MT
1/*
2 * Virtio PCI driver
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 *
9 * Authors:
10 * Anthony Liguori <aliguori@us.ibm.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
14 *
15 */
16
17#include "virtio_pci.h"
18
19/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
20static const struct pci_device_id virtio_pci_id_table[] = {
21 { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
22 { 0 }
23};
24
25MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
26
27/* virtio config->get_features() implementation */
28static u64 vp_get_features(struct virtio_device *vdev)
29{
30 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
31
32 /* When someone needs more than 32 feature bits, we'll need to
33 * steal a bit to indicate that the rest are somewhere else. */
34 return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
35}
36
37/* virtio config->finalize_features() implementation */
38static int vp_finalize_features(struct virtio_device *vdev)
39{
40 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
41
42 /* Give virtio_ring a chance to accept features. */
43 vring_transport_features(vdev);
44
45 /* Make sure we don't have any features > 32 bits! */
46 BUG_ON((u32)vdev->features != vdev->features);
47
48 /* We only support 32 feature bits. */
49 iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
50
51 return 0;
52}
53
54/* virtio config->get() implementation */
55static void vp_get(struct virtio_device *vdev, unsigned offset,
56 void *buf, unsigned len)
57{
58 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
59 void __iomem *ioaddr = vp_dev->ioaddr +
60 VIRTIO_PCI_CONFIG(vp_dev) + offset;
61 u8 *ptr = buf;
62 int i;
63
64 for (i = 0; i < len; i++)
65 ptr[i] = ioread8(ioaddr + i);
66}
67
68/* the config->set() implementation. it's symmetric to the config->get()
69 * implementation */
70static void vp_set(struct virtio_device *vdev, unsigned offset,
71 const void *buf, unsigned len)
72{
73 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
74 void __iomem *ioaddr = vp_dev->ioaddr +
75 VIRTIO_PCI_CONFIG(vp_dev) + offset;
76 const u8 *ptr = buf;
77 int i;
78
79 for (i = 0; i < len; i++)
80 iowrite8(ptr[i], ioaddr + i);
81}
82
83/* config->{get,set}_status() implementations */
84static u8 vp_get_status(struct virtio_device *vdev)
85{
86 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
87 return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
88}
89
90static void vp_set_status(struct virtio_device *vdev, u8 status)
91{
92 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
93 /* We should never be setting status to 0. */
94 BUG_ON(status == 0);
95 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
96}
97
98static void vp_reset(struct virtio_device *vdev)
99{
100 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
101 /* 0 status means a reset. */
102 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
103 /* Flush out the status write, and flush in device writes,
104 * including MSi-X interrupts, if any. */
105 ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
106 /* Flush pending VQ/configuration callbacks. */
107 vp_synchronize_vectors(vdev);
108}
109
110static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
111{
112 /* Setup the vector used for configuration events */
113 iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
114 /* Verify we had enough resources to assign the vector */
115 /* Will also flush the write out to device */
116 return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
117}
118
119static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
120 struct virtio_pci_vq_info *info,
121 unsigned index,
122 void (*callback)(struct virtqueue *vq),
123 const char *name,
124 u16 msix_vec)
125{
126 struct virtqueue *vq;
127 unsigned long size;
128 u16 num;
129 int err;
130
131 /* Select the queue we're interested in */
132 iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
133
134 /* Check if queue is either not available or already active. */
135 num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
136 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
137 return ERR_PTR(-ENOENT);
138
139 info->num = num;
140 info->msix_vector = msix_vec;
141
142 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
143 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
144 if (info->queue == NULL)
145 return ERR_PTR(-ENOMEM);
146
147 /* activate the queue */
148 iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
149 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
150
151 /* create the vring */
152 vq = vring_new_virtqueue(index, info->num,
153 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
154 true, info->queue, vp_notify, callback, name);
155 if (!vq) {
156 err = -ENOMEM;
157 goto out_activate_queue;
158 }
159
160 vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
161
162 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
163 iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
164 msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
165 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
166 err = -EBUSY;
167 goto out_assign;
168 }
169 }
170
171 return vq;
172
173out_assign:
174 vring_del_virtqueue(vq);
175out_activate_queue:
176 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
177 free_pages_exact(info->queue, size);
178 return ERR_PTR(err);
179}
180
181static void del_vq(struct virtio_pci_vq_info *info)
182{
183 struct virtqueue *vq = info->vq;
184 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
185 unsigned long size;
186
187 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
188
189 if (vp_dev->msix_enabled) {
190 iowrite16(VIRTIO_MSI_NO_VECTOR,
191 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
192 /* Flush the write out to device */
193 ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
194 }
195
196 vring_del_virtqueue(vq);
197
198 /* Select and deactivate the queue */
199 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
200
201 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
202 free_pages_exact(info->queue, size);
203}
204
205static const struct virtio_config_ops virtio_pci_config_ops = {
206 .get = vp_get,
207 .set = vp_set,
208 .get_status = vp_get_status,
209 .set_status = vp_set_status,
210 .reset = vp_reset,
211 .find_vqs = vp_find_vqs,
212 .del_vqs = vp_del_vqs,
213 .get_features = vp_get_features,
214 .finalize_features = vp_finalize_features,
215 .bus_name = vp_bus_name,
216 .set_vq_affinity = vp_set_vq_affinity,
217};
218
219/* the PCI probing function */
220static int virtio_pci_probe(struct pci_dev *pci_dev,
221 const struct pci_device_id *id)
222{
223 struct virtio_pci_device *vp_dev;
224 int err;
225
226 /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
227 if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
228 return -ENODEV;
229
230 if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
231 printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
232 VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
233 return -ENODEV;
234 }
235
236 /* allocate our structure and fill it out */
237 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
238 if (vp_dev == NULL)
239 return -ENOMEM;
240
241 vp_dev->vdev.dev.parent = &pci_dev->dev;
242 vp_dev->vdev.dev.release = virtio_pci_release_dev;
243 vp_dev->vdev.config = &virtio_pci_config_ops;
244 vp_dev->pci_dev = pci_dev;
245 INIT_LIST_HEAD(&vp_dev->virtqueues);
246 spin_lock_init(&vp_dev->lock);
247
248 /* Disable MSI/MSIX to bring device to a known good state. */
249 pci_msi_off(pci_dev);
250
251 /* enable the device */
252 err = pci_enable_device(pci_dev);
253 if (err)
254 goto out;
255
256 err = pci_request_regions(pci_dev, "virtio-pci");
257 if (err)
258 goto out_enable_device;
259
260 vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
261 if (vp_dev->ioaddr == NULL) {
262 err = -ENOMEM;
263 goto out_req_regions;
264 }
265
266 vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
267
268 pci_set_drvdata(pci_dev, vp_dev);
269 pci_set_master(pci_dev);
270
271 /* we use the subsystem vendor/device id as the virtio vendor/device
272 * id. this allows us to use the same PCI vendor/device id for all
273 * virtio devices and to identify the particular virtio driver by
274 * the subsystem ids */
275 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
276 vp_dev->vdev.id.device = pci_dev->subsystem_device;
277
278 vp_dev->config_vector = vp_config_vector;
279 vp_dev->setup_vq = setup_vq;
280 vp_dev->del_vq = del_vq;
281
282 /* finally register the virtio device */
283 err = register_virtio_device(&vp_dev->vdev);
284 if (err)
285 goto out_set_drvdata;
286
287 return 0;
288
289out_set_drvdata:
290 pci_iounmap(pci_dev, vp_dev->ioaddr);
291out_req_regions:
292 pci_release_regions(pci_dev);
293out_enable_device:
294 pci_disable_device(pci_dev);
295out:
296 kfree(vp_dev);
297 return err;
298}
299
300static void virtio_pci_remove(struct pci_dev *pci_dev)
301{
302 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
303
304 unregister_virtio_device(&vp_dev->vdev);
305
306 vp_del_vqs(&vp_dev->vdev);
307 pci_iounmap(pci_dev, vp_dev->ioaddr);
308 pci_release_regions(pci_dev);
309 pci_disable_device(pci_dev);
310 kfree(vp_dev);
311}
312
313static struct pci_driver virtio_pci_driver = {
314 .name = "virtio-pci",
315 .id_table = virtio_pci_id_table,
316 .probe = virtio_pci_probe,
317 .remove = virtio_pci_remove,
318#ifdef CONFIG_PM_SLEEP
319 .driver.pm = &virtio_pci_pm_ops,
320#endif
321};
322
323module_pci_driver(virtio_pci_driver);