xen/pciback: Use mutexes when working with Xenbus state transitions.
[linux-2.6-block.git] / drivers / xen / xen-pciback / vpci.c
CommitLineData
30edc14b
KRW
1/*
2 * PCI Backend - Provides a Virtual PCI bus (with real devices)
3 * to the frontend
4 *
5 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6 */
7
8#include <linux/list.h>
9#include <linux/slab.h>
10#include <linux/pci.h>
11#include <linux/spinlock.h>
12#include "pciback.h"
13
14#define PCI_SLOT_MAX 32
15
16struct vpci_dev_data {
17 /* Access to dev_list must be protected by lock */
18 struct list_head dev_list[PCI_SLOT_MAX];
19 spinlock_t lock;
20};
21
22static inline struct list_head *list_first(struct list_head *head)
23{
24 return head->next;
25}
26
2ebdc426
KRW
27static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
28 unsigned int domain,
29 unsigned int bus,
30 unsigned int devfn)
30edc14b
KRW
31{
32 struct pci_dev_entry *entry;
33 struct pci_dev *dev = NULL;
34 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
35 unsigned long flags;
36
37 if (domain != 0 || bus != 0)
38 return NULL;
39
40 if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
41 spin_lock_irqsave(&vpci_dev->lock, flags);
42
43 list_for_each_entry(entry,
44 &vpci_dev->dev_list[PCI_SLOT(devfn)],
45 list) {
46 if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
47 dev = entry->dev;
48 break;
49 }
50 }
51
52 spin_unlock_irqrestore(&vpci_dev->lock, flags);
53 }
54 return dev;
55}
56
57static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
58{
59 if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
60 && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
61 return 1;
62
63 return 0;
64}
65
2ebdc426
KRW
66static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
67 struct pci_dev *dev, int devid,
68 publish_pci_dev_cb publish_cb)
30edc14b
KRW
69{
70 int err = 0, slot, func = -1;
71 struct pci_dev_entry *t, *dev_entry;
72 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
73 unsigned long flags;
74
75 if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
76 err = -EFAULT;
77 xenbus_dev_fatal(pdev->xdev, err,
78 "Can't export bridges on the virtual PCI bus");
79 goto out;
80 }
81
82 dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
83 if (!dev_entry) {
84 err = -ENOMEM;
85 xenbus_dev_fatal(pdev->xdev, err,
86 "Error adding entry to virtual PCI bus");
87 goto out;
88 }
89
90 dev_entry->dev = dev;
91
92 spin_lock_irqsave(&vpci_dev->lock, flags);
93
94 /* Keep multi-function devices together on the virtual PCI bus */
95 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
96 if (!list_empty(&vpci_dev->dev_list[slot])) {
97 t = list_entry(list_first(&vpci_dev->dev_list[slot]),
98 struct pci_dev_entry, list);
99
100 if (match_slot(dev, t->dev)) {
a92336a1 101 pr_info(DRV_NAME ": vpci: %s: "
30edc14b
KRW
102 "assign to virtual slot %d func %d\n",
103 pci_name(dev), slot,
104 PCI_FUNC(dev->devfn));
105 list_add_tail(&dev_entry->list,
106 &vpci_dev->dev_list[slot]);
107 func = PCI_FUNC(dev->devfn);
108 goto unlock;
109 }
110 }
111 }
112
113 /* Assign to a new slot on the virtual PCI bus */
114 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
115 if (list_empty(&vpci_dev->dev_list[slot])) {
a92336a1
KRW
116 printk(KERN_INFO DRV_NAME
117 ": vpci: %s: assign to virtual slot %d\n",
30edc14b
KRW
118 pci_name(dev), slot);
119 list_add_tail(&dev_entry->list,
120 &vpci_dev->dev_list[slot]);
121 func = PCI_FUNC(dev->devfn);
122 goto unlock;
123 }
124 }
125
126 err = -ENOMEM;
127 xenbus_dev_fatal(pdev->xdev, err,
128 "No more space on root virtual PCI bus");
129
130unlock:
131 spin_unlock_irqrestore(&vpci_dev->lock, flags);
132
133 /* Publish this device. */
134 if (!err)
135 err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
136
137out:
138 return err;
139}
140
2ebdc426
KRW
141static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
142 struct pci_dev *dev)
30edc14b
KRW
143{
144 int slot;
145 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
146 struct pci_dev *found_dev = NULL;
147 unsigned long flags;
148
149 spin_lock_irqsave(&vpci_dev->lock, flags);
150
151 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
402c5e15
JB
152 struct pci_dev_entry *e;
153
154 list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
30edc14b
KRW
155 if (e->dev == dev) {
156 list_del(&e->list);
157 found_dev = e->dev;
158 kfree(e);
159 goto out;
160 }
161 }
162 }
163
164out:
165 spin_unlock_irqrestore(&vpci_dev->lock, flags);
166
167 if (found_dev)
168 pcistub_put_pci_dev(found_dev);
169}
170
2ebdc426 171static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
30edc14b
KRW
172{
173 int slot;
174 struct vpci_dev_data *vpci_dev;
175
176 vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
177 if (!vpci_dev)
178 return -ENOMEM;
179
180 spin_lock_init(&vpci_dev->lock);
181
182 for (slot = 0; slot < PCI_SLOT_MAX; slot++)
183 INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
184
185 pdev->pci_dev_data = vpci_dev;
186
187 return 0;
188}
189
2ebdc426
KRW
190static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
191 publish_pci_root_cb publish_cb)
30edc14b
KRW
192{
193 /* The Virtual PCI bus has only one root */
194 return publish_cb(pdev, 0, 0);
195}
196
2ebdc426 197static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
30edc14b
KRW
198{
199 int slot;
200 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
201
202 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
203 struct pci_dev_entry *e, *tmp;
204 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
205 list) {
206 list_del(&e->list);
207 pcistub_put_pci_dev(e->dev);
208 kfree(e);
209 }
210 }
211
212 kfree(vpci_dev);
213 pdev->pci_dev_data = NULL;
214}
215
2ebdc426
KRW
216static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
217 struct xen_pcibk_device *pdev,
218 unsigned int *domain, unsigned int *bus,
219 unsigned int *devfn)
30edc14b
KRW
220{
221 struct pci_dev_entry *entry;
222 struct pci_dev *dev = NULL;
223 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
224 unsigned long flags;
225 int found = 0, slot;
226
227 spin_lock_irqsave(&vpci_dev->lock, flags);
228 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
229 list_for_each_entry(entry,
230 &vpci_dev->dev_list[slot],
231 list) {
232 dev = entry->dev;
233 if (dev && dev->bus->number == pcidev->bus->number
234 && pci_domain_nr(dev->bus) ==
235 pci_domain_nr(pcidev->bus)
236 && dev->devfn == pcidev->devfn) {
237 found = 1;
238 *domain = 0;
239 *bus = 0;
240 *devfn = PCI_DEVFN(slot,
241 PCI_FUNC(pcidev->devfn));
242 }
243 }
244 }
245 spin_unlock_irqrestore(&vpci_dev->lock, flags);
246 return found;
247}
2ebdc426 248
402c5e15 249const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
2ebdc426
KRW
250 .name = "vpci",
251 .init = __xen_pcibk_init_devices,
252 .free = __xen_pcibk_release_devices,
253 .find = __xen_pcibk_get_pcifront_dev,
254 .publish = __xen_pcibk_publish_pci_roots,
255 .release = __xen_pcibk_release_pci_dev,
256 .add = __xen_pcibk_add_pci_dev,
257 .get = __xen_pcibk_get_pci_dev,
258};