Commit | Line | Data |
---|---|---|
185a383a KB |
1 | /* |
2 | * Volume Management Device driver | |
3 | * Copyright (c) 2015, Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/device.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/irq.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/msi.h> | |
21 | #include <linux/pci.h> | |
22 | #include <linux/rculist.h> | |
23 | #include <linux/rcupdate.h> | |
24 | ||
25 | #include <asm/irqdomain.h> | |
26 | #include <asm/device.h> | |
27 | #include <asm/msi.h> | |
28 | #include <asm/msidef.h> | |
29 | ||
30 | #define VMD_CFGBAR 0 | |
31 | #define VMD_MEMBAR1 2 | |
32 | #define VMD_MEMBAR2 4 | |
33 | ||
34 | /* | |
35 | * Lock for manipulating VMD IRQ lists. | |
36 | */ | |
37 | static DEFINE_RAW_SPINLOCK(list_lock); | |
38 | ||
39 | /** | |
40 | * struct vmd_irq - private data to map driver IRQ to the VMD shared vector | |
41 | * @node: list item for parent traversal. | |
42 | * @rcu: RCU callback item for freeing. | |
43 | * @irq: back pointer to parent. | |
21c80c9f | 44 | * @enabled: true if driver enabled IRQ |
185a383a KB |
45 | * @virq: the virtual IRQ value provided to the requesting driver. |
46 | * | |
47 | * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to | |
48 | * a VMD IRQ using this structure. | |
49 | */ | |
50 | struct vmd_irq { | |
51 | struct list_head node; | |
52 | struct rcu_head rcu; | |
53 | struct vmd_irq_list *irq; | |
21c80c9f | 54 | bool enabled; |
185a383a KB |
55 | unsigned int virq; |
56 | }; | |
57 | ||
58 | /** | |
59 | * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector | |
60 | * @irq_list: the list of irq's the VMD one demuxes to. | |
61 | * @vmd_vector: the h/w IRQ assigned to the VMD. | |
62 | * @index: index into the VMD MSI-X table; used for message routing. | |
63 | * @count: number of child IRQs assigned to this vector; used to track | |
64 | * sharing. | |
65 | */ | |
66 | struct vmd_irq_list { | |
67 | struct list_head irq_list; | |
68 | struct vmd_dev *vmd; | |
69 | unsigned int vmd_vector; | |
70 | unsigned int index; | |
71 | unsigned int count; | |
72 | }; | |
73 | ||
74 | struct vmd_dev { | |
75 | struct pci_dev *dev; | |
76 | ||
77 | spinlock_t cfg_lock; | |
78 | char __iomem *cfgbar; | |
79 | ||
80 | int msix_count; | |
81 | struct msix_entry *msix_entries; | |
82 | struct vmd_irq_list *irqs; | |
83 | ||
84 | struct pci_sysdata sysdata; | |
85 | struct resource resources[3]; | |
86 | struct irq_domain *irq_domain; | |
87 | struct pci_bus *bus; | |
88 | ||
89 | #ifdef CONFIG_X86_DEV_DMA_OPS | |
90 | struct dma_map_ops dma_ops; | |
91 | struct dma_domain dma_domain; | |
92 | #endif | |
93 | }; | |
94 | ||
95 | static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) | |
96 | { | |
97 | return container_of(bus->sysdata, struct vmd_dev, sysdata); | |
98 | } | |
99 | ||
100 | /* | |
101 | * Drivers managing a device in a VMD domain allocate their own IRQs as before, | |
102 | * but the MSI entry for the hardware it's driving will be programmed with a | |
103 | * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its | |
104 | * domain into one of its own, and the VMD driver de-muxes these for the | |
105 | * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations | |
106 | * and irq_chip to set this up. | |
107 | */ | |
108 | static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |
109 | { | |
110 | struct vmd_irq *vmdirq = data->chip_data; | |
111 | struct vmd_irq_list *irq = vmdirq->irq; | |
112 | ||
113 | msg->address_hi = MSI_ADDR_BASE_HI; | |
114 | msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_ID(irq->index); | |
115 | msg->data = 0; | |
116 | } | |
117 | ||
118 | /* | |
119 | * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. | |
120 | */ | |
121 | static void vmd_irq_enable(struct irq_data *data) | |
122 | { | |
123 | struct vmd_irq *vmdirq = data->chip_data; | |
3f57ff4f | 124 | unsigned long flags; |
185a383a | 125 | |
3f57ff4f | 126 | raw_spin_lock_irqsave(&list_lock, flags); |
21c80c9f | 127 | WARN_ON(vmdirq->enabled); |
185a383a | 128 | list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); |
21c80c9f | 129 | vmdirq->enabled = true; |
3f57ff4f | 130 | raw_spin_unlock_irqrestore(&list_lock, flags); |
185a383a KB |
131 | |
132 | data->chip->irq_unmask(data); | |
133 | } | |
134 | ||
135 | static void vmd_irq_disable(struct irq_data *data) | |
136 | { | |
137 | struct vmd_irq *vmdirq = data->chip_data; | |
3f57ff4f | 138 | unsigned long flags; |
185a383a KB |
139 | |
140 | data->chip->irq_mask(data); | |
141 | ||
3f57ff4f | 142 | raw_spin_lock_irqsave(&list_lock, flags); |
21c80c9f KB |
143 | if (vmdirq->enabled) { |
144 | list_del_rcu(&vmdirq->node); | |
145 | vmdirq->enabled = false; | |
146 | } | |
3f57ff4f | 147 | raw_spin_unlock_irqrestore(&list_lock, flags); |
185a383a KB |
148 | } |
149 | ||
150 | /* | |
151 | * XXX: Stubbed until we develop acceptable way to not create conflicts with | |
152 | * other devices sharing the same vector. | |
153 | */ | |
154 | static int vmd_irq_set_affinity(struct irq_data *data, | |
155 | const struct cpumask *dest, bool force) | |
156 | { | |
157 | return -EINVAL; | |
158 | } | |
159 | ||
160 | static struct irq_chip vmd_msi_controller = { | |
161 | .name = "VMD-MSI", | |
162 | .irq_enable = vmd_irq_enable, | |
163 | .irq_disable = vmd_irq_disable, | |
164 | .irq_compose_msi_msg = vmd_compose_msi_msg, | |
165 | .irq_set_affinity = vmd_irq_set_affinity, | |
166 | }; | |
167 | ||
168 | static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, | |
169 | msi_alloc_info_t *arg) | |
170 | { | |
171 | return 0; | |
172 | } | |
173 | ||
174 | /* | |
175 | * XXX: We can be even smarter selecting the best IRQ once we solve the | |
176 | * affinity problem. | |
177 | */ | |
9c205304 | 178 | static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) |
185a383a | 179 | { |
9c205304 | 180 | int i, best = 1; |
3f57ff4f | 181 | unsigned long flags; |
185a383a | 182 | |
9c205304 KB |
183 | if (!desc->msi_attrib.is_msix || vmd->msix_count == 1) |
184 | return &vmd->irqs[0]; | |
185 | ||
3f57ff4f | 186 | raw_spin_lock_irqsave(&list_lock, flags); |
185a383a KB |
187 | for (i = 1; i < vmd->msix_count; i++) |
188 | if (vmd->irqs[i].count < vmd->irqs[best].count) | |
189 | best = i; | |
190 | vmd->irqs[best].count++; | |
3f57ff4f | 191 | raw_spin_unlock_irqrestore(&list_lock, flags); |
185a383a KB |
192 | |
193 | return &vmd->irqs[best]; | |
194 | } | |
195 | ||
196 | static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, | |
197 | unsigned int virq, irq_hw_number_t hwirq, | |
198 | msi_alloc_info_t *arg) | |
199 | { | |
9c205304 KB |
200 | struct msi_desc *desc = arg->desc; |
201 | struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); | |
185a383a KB |
202 | struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); |
203 | ||
204 | if (!vmdirq) | |
205 | return -ENOMEM; | |
206 | ||
207 | INIT_LIST_HEAD(&vmdirq->node); | |
9c205304 | 208 | vmdirq->irq = vmd_next_irq(vmd, desc); |
185a383a KB |
209 | vmdirq->virq = virq; |
210 | ||
211 | irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip, | |
30ce0350 | 212 | vmdirq, handle_untracked_irq, vmd, NULL); |
185a383a KB |
213 | return 0; |
214 | } | |
215 | ||
216 | static void vmd_msi_free(struct irq_domain *domain, | |
217 | struct msi_domain_info *info, unsigned int virq) | |
218 | { | |
219 | struct vmd_irq *vmdirq = irq_get_chip_data(virq); | |
3f57ff4f | 220 | unsigned long flags; |
185a383a KB |
221 | |
222 | /* XXX: Potential optimization to rebalance */ | |
3f57ff4f | 223 | raw_spin_lock_irqsave(&list_lock, flags); |
185a383a | 224 | vmdirq->irq->count--; |
3f57ff4f | 225 | raw_spin_unlock_irqrestore(&list_lock, flags); |
185a383a KB |
226 | |
227 | kfree_rcu(vmdirq, rcu); | |
228 | } | |
229 | ||
230 | static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, | |
231 | int nvec, msi_alloc_info_t *arg) | |
232 | { | |
233 | struct pci_dev *pdev = to_pci_dev(dev); | |
234 | struct vmd_dev *vmd = vmd_from_bus(pdev->bus); | |
235 | ||
236 | if (nvec > vmd->msix_count) | |
237 | return vmd->msix_count; | |
238 | ||
239 | memset(arg, 0, sizeof(*arg)); | |
240 | return 0; | |
241 | } | |
242 | ||
243 | static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) | |
244 | { | |
245 | arg->desc = desc; | |
246 | } | |
247 | ||
248 | static struct msi_domain_ops vmd_msi_domain_ops = { | |
249 | .get_hwirq = vmd_get_hwirq, | |
250 | .msi_init = vmd_msi_init, | |
251 | .msi_free = vmd_msi_free, | |
252 | .msi_prepare = vmd_msi_prepare, | |
253 | .set_desc = vmd_set_desc, | |
254 | }; | |
255 | ||
256 | static struct msi_domain_info vmd_msi_domain_info = { | |
257 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | |
258 | MSI_FLAG_PCI_MSIX, | |
259 | .ops = &vmd_msi_domain_ops, | |
260 | .chip = &vmd_msi_controller, | |
261 | }; | |
262 | ||
263 | #ifdef CONFIG_X86_DEV_DMA_OPS | |
264 | /* | |
265 | * VMD replaces the requester ID with its own. DMA mappings for devices in a | |
266 | * VMD domain need to be mapped for the VMD, not the device requiring | |
267 | * the mapping. | |
268 | */ | |
269 | static struct device *to_vmd_dev(struct device *dev) | |
270 | { | |
271 | struct pci_dev *pdev = to_pci_dev(dev); | |
272 | struct vmd_dev *vmd = vmd_from_bus(pdev->bus); | |
273 | ||
274 | return &vmd->dev->dev; | |
275 | } | |
276 | ||
277 | static struct dma_map_ops *vmd_dma_ops(struct device *dev) | |
278 | { | |
ca8a8fab | 279 | return get_dma_ops(to_vmd_dev(dev)); |
185a383a KB |
280 | } |
281 | ||
282 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, | |
00085f1e | 283 | gfp_t flag, unsigned long attrs) |
185a383a KB |
284 | { |
285 | return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, | |
286 | attrs); | |
287 | } | |
288 | ||
289 | static void vmd_free(struct device *dev, size_t size, void *vaddr, | |
00085f1e | 290 | dma_addr_t addr, unsigned long attrs) |
185a383a KB |
291 | { |
292 | return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, | |
293 | attrs); | |
294 | } | |
295 | ||
296 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, | |
297 | void *cpu_addr, dma_addr_t addr, size_t size, | |
00085f1e | 298 | unsigned long attrs) |
185a383a KB |
299 | { |
300 | return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, | |
301 | size, attrs); | |
302 | } | |
303 | ||
304 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, | |
305 | void *cpu_addr, dma_addr_t addr, size_t size, | |
00085f1e | 306 | unsigned long attrs) |
185a383a KB |
307 | { |
308 | return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, | |
309 | addr, size, attrs); | |
310 | } | |
311 | ||
312 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, | |
313 | unsigned long offset, size_t size, | |
314 | enum dma_data_direction dir, | |
00085f1e | 315 | unsigned long attrs) |
185a383a KB |
316 | { |
317 | return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, | |
318 | dir, attrs); | |
319 | } | |
320 | ||
321 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, | |
00085f1e | 322 | enum dma_data_direction dir, unsigned long attrs) |
185a383a KB |
323 | { |
324 | vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); | |
325 | } | |
326 | ||
327 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
00085f1e | 328 | enum dma_data_direction dir, unsigned long attrs) |
185a383a KB |
329 | { |
330 | return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | |
331 | } | |
332 | ||
333 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
00085f1e | 334 | enum dma_data_direction dir, unsigned long attrs) |
185a383a KB |
335 | { |
336 | vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | |
337 | } | |
338 | ||
339 | static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |
340 | size_t size, enum dma_data_direction dir) | |
341 | { | |
342 | vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); | |
343 | } | |
344 | ||
345 | static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, | |
346 | size_t size, enum dma_data_direction dir) | |
347 | { | |
348 | vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, | |
349 | dir); | |
350 | } | |
351 | ||
352 | static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
353 | int nents, enum dma_data_direction dir) | |
354 | { | |
355 | vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); | |
356 | } | |
357 | ||
358 | static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
359 | int nents, enum dma_data_direction dir) | |
360 | { | |
361 | vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); | |
362 | } | |
363 | ||
364 | static int vmd_mapping_error(struct device *dev, dma_addr_t addr) | |
365 | { | |
366 | return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); | |
367 | } | |
368 | ||
369 | static int vmd_dma_supported(struct device *dev, u64 mask) | |
370 | { | |
371 | return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); | |
372 | } | |
373 | ||
374 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK | |
375 | static u64 vmd_get_required_mask(struct device *dev) | |
376 | { | |
377 | return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); | |
378 | } | |
379 | #endif | |
380 | ||
381 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) | |
382 | { | |
383 | struct dma_domain *domain = &vmd->dma_domain; | |
384 | ||
ca8a8fab | 385 | if (get_dma_ops(&vmd->dev->dev)) |
185a383a KB |
386 | del_dma_domain(domain); |
387 | } | |
388 | ||
389 | #define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ | |
390 | do { \ | |
391 | if (source->fn) \ | |
392 | dest->fn = vmd_##fn; \ | |
393 | } while (0) | |
394 | ||
395 | static void vmd_setup_dma_ops(struct vmd_dev *vmd) | |
396 | { | |
ca8a8fab | 397 | const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); |
185a383a KB |
398 | struct dma_map_ops *dest = &vmd->dma_ops; |
399 | struct dma_domain *domain = &vmd->dma_domain; | |
400 | ||
401 | domain->domain_nr = vmd->sysdata.domain; | |
402 | domain->dma_ops = dest; | |
403 | ||
404 | if (!source) | |
405 | return; | |
406 | ASSIGN_VMD_DMA_OPS(source, dest, alloc); | |
407 | ASSIGN_VMD_DMA_OPS(source, dest, free); | |
408 | ASSIGN_VMD_DMA_OPS(source, dest, mmap); | |
409 | ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); | |
410 | ASSIGN_VMD_DMA_OPS(source, dest, map_page); | |
411 | ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); | |
412 | ASSIGN_VMD_DMA_OPS(source, dest, map_sg); | |
413 | ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); | |
414 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); | |
415 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); | |
416 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); | |
417 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); | |
418 | ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); | |
419 | ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); | |
420 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK | |
421 | ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); | |
422 | #endif | |
423 | add_dma_domain(domain); | |
424 | } | |
425 | #undef ASSIGN_VMD_DMA_OPS | |
426 | #else | |
427 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} | |
428 | static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} | |
429 | #endif | |
430 | ||
431 | static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, | |
432 | unsigned int devfn, int reg, int len) | |
433 | { | |
434 | char __iomem *addr = vmd->cfgbar + | |
435 | (bus->number << 20) + (devfn << 12) + reg; | |
436 | ||
437 | if ((addr - vmd->cfgbar) + len >= | |
438 | resource_size(&vmd->dev->resource[VMD_CFGBAR])) | |
439 | return NULL; | |
440 | ||
441 | return addr; | |
442 | } | |
443 | ||
444 | /* | |
445 | * CPU may deadlock if config space is not serialized on some versions of this | |
446 | * hardware, so all config space access is done under a spinlock. | |
447 | */ | |
448 | static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, | |
449 | int len, u32 *value) | |
450 | { | |
451 | struct vmd_dev *vmd = vmd_from_bus(bus); | |
452 | char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); | |
453 | unsigned long flags; | |
454 | int ret = 0; | |
455 | ||
456 | if (!addr) | |
457 | return -EFAULT; | |
458 | ||
459 | spin_lock_irqsave(&vmd->cfg_lock, flags); | |
460 | switch (len) { | |
461 | case 1: | |
462 | *value = readb(addr); | |
463 | break; | |
464 | case 2: | |
465 | *value = readw(addr); | |
466 | break; | |
467 | case 4: | |
468 | *value = readl(addr); | |
469 | break; | |
470 | default: | |
471 | ret = -EINVAL; | |
472 | break; | |
473 | } | |
474 | spin_unlock_irqrestore(&vmd->cfg_lock, flags); | |
475 | return ret; | |
476 | } | |
477 | ||
478 | /* | |
479 | * VMD h/w converts non-posted config writes to posted memory writes. The | |
480 | * read-back in this function forces the completion so it returns only after | |
481 | * the config space was written, as expected. | |
482 | */ | |
483 | static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, | |
484 | int len, u32 value) | |
485 | { | |
486 | struct vmd_dev *vmd = vmd_from_bus(bus); | |
487 | char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); | |
488 | unsigned long flags; | |
489 | int ret = 0; | |
490 | ||
491 | if (!addr) | |
492 | return -EFAULT; | |
493 | ||
494 | spin_lock_irqsave(&vmd->cfg_lock, flags); | |
495 | switch (len) { | |
496 | case 1: | |
497 | writeb(value, addr); | |
498 | readb(addr); | |
499 | break; | |
500 | case 2: | |
501 | writew(value, addr); | |
502 | readw(addr); | |
503 | break; | |
504 | case 4: | |
505 | writel(value, addr); | |
506 | readl(addr); | |
507 | break; | |
508 | default: | |
509 | ret = -EINVAL; | |
510 | break; | |
511 | } | |
512 | spin_unlock_irqrestore(&vmd->cfg_lock, flags); | |
513 | return ret; | |
514 | } | |
515 | ||
516 | static struct pci_ops vmd_ops = { | |
517 | .read = vmd_pci_read, | |
518 | .write = vmd_pci_write, | |
519 | }; | |
520 | ||
2c2c5c5c JD |
521 | static void vmd_attach_resources(struct vmd_dev *vmd) |
522 | { | |
523 | vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; | |
524 | vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; | |
525 | } | |
526 | ||
527 | static void vmd_detach_resources(struct vmd_dev *vmd) | |
528 | { | |
529 | vmd->dev->resource[VMD_MEMBAR1].child = NULL; | |
530 | vmd->dev->resource[VMD_MEMBAR2].child = NULL; | |
531 | } | |
532 | ||
185a383a KB |
533 | /* |
534 | * VMD domains start at 0x1000 to not clash with ACPI _SEG domains. | |
535 | */ | |
536 | static int vmd_find_free_domain(void) | |
537 | { | |
538 | int domain = 0xffff; | |
539 | struct pci_bus *bus = NULL; | |
540 | ||
541 | while ((bus = pci_find_next_bus(bus)) != NULL) | |
542 | domain = max_t(int, domain, pci_domain_nr(bus)); | |
543 | return domain + 1; | |
544 | } | |
545 | ||
546 | static int vmd_enable_domain(struct vmd_dev *vmd) | |
547 | { | |
548 | struct pci_sysdata *sd = &vmd->sysdata; | |
549 | struct resource *res; | |
550 | u32 upper_bits; | |
551 | unsigned long flags; | |
552 | LIST_HEAD(resources); | |
553 | ||
554 | res = &vmd->dev->resource[VMD_CFGBAR]; | |
555 | vmd->resources[0] = (struct resource) { | |
556 | .name = "VMD CFGBAR", | |
d068c350 | 557 | .start = 0, |
185a383a KB |
558 | .end = (resource_size(res) >> 20) - 1, |
559 | .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, | |
560 | }; | |
561 | ||
83cc54a6 KB |
562 | /* |
563 | * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can | |
564 | * put 32-bit resources in the window. | |
565 | * | |
566 | * There's no hardware reason why a 64-bit window *couldn't* | |
567 | * contain a 32-bit resource, but pbus_size_mem() computes the | |
568 | * bridge window size assuming a 64-bit window will contain no | |
569 | * 32-bit resources. __pci_assign_resource() enforces that | |
570 | * artificial restriction to make sure everything will fit. | |
571 | * | |
572 | * The only way we could use a 64-bit non-prefechable MEMBAR is | |
573 | * if its address is <4GB so that we can convert it to a 32-bit | |
574 | * resource. To be visible to the host OS, all VMD endpoints must | |
575 | * be initially configured by platform BIOS, which includes setting | |
576 | * up these resources. We can assume the device is configured | |
577 | * according to the platform needs. | |
578 | */ | |
185a383a KB |
579 | res = &vmd->dev->resource[VMD_MEMBAR1]; |
580 | upper_bits = upper_32_bits(res->end); | |
581 | flags = res->flags & ~IORESOURCE_SIZEALIGN; | |
582 | if (!upper_bits) | |
583 | flags &= ~IORESOURCE_MEM_64; | |
584 | vmd->resources[1] = (struct resource) { | |
585 | .name = "VMD MEMBAR1", | |
586 | .start = res->start, | |
587 | .end = res->end, | |
588 | .flags = flags, | |
2c2c5c5c | 589 | .parent = res, |
185a383a KB |
590 | }; |
591 | ||
592 | res = &vmd->dev->resource[VMD_MEMBAR2]; | |
593 | upper_bits = upper_32_bits(res->end); | |
594 | flags = res->flags & ~IORESOURCE_SIZEALIGN; | |
595 | if (!upper_bits) | |
596 | flags &= ~IORESOURCE_MEM_64; | |
597 | vmd->resources[2] = (struct resource) { | |
598 | .name = "VMD MEMBAR2", | |
599 | .start = res->start + 0x2000, | |
600 | .end = res->end, | |
601 | .flags = flags, | |
2c2c5c5c | 602 | .parent = res, |
185a383a KB |
603 | }; |
604 | ||
605 | sd->domain = vmd_find_free_domain(); | |
606 | if (sd->domain < 0) | |
607 | return sd->domain; | |
608 | ||
609 | sd->node = pcibus_to_node(vmd->dev->bus); | |
610 | ||
611 | vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info, | |
e382dffc | 612 | x86_vector_domain); |
185a383a KB |
613 | if (!vmd->irq_domain) |
614 | return -ENODEV; | |
615 | ||
616 | pci_add_resource(&resources, &vmd->resources[0]); | |
617 | pci_add_resource(&resources, &vmd->resources[1]); | |
618 | pci_add_resource(&resources, &vmd->resources[2]); | |
619 | vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd, | |
620 | &resources); | |
621 | if (!vmd->bus) { | |
622 | pci_free_resource_list(&resources); | |
623 | irq_domain_remove(vmd->irq_domain); | |
624 | return -ENODEV; | |
625 | } | |
626 | ||
2c2c5c5c | 627 | vmd_attach_resources(vmd); |
185a383a KB |
628 | vmd_setup_dma_ops(vmd); |
629 | dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); | |
630 | pci_rescan_bus(vmd->bus); | |
631 | ||
632 | WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, | |
633 | "domain"), "Can't create symlink to domain\n"); | |
634 | return 0; | |
635 | } | |
636 | ||
637 | static irqreturn_t vmd_irq(int irq, void *data) | |
638 | { | |
639 | struct vmd_irq_list *irqs = data; | |
640 | struct vmd_irq *vmdirq; | |
641 | ||
642 | rcu_read_lock(); | |
643 | list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) | |
644 | generic_handle_irq(vmdirq->virq); | |
645 | rcu_read_unlock(); | |
646 | ||
647 | return IRQ_HANDLED; | |
648 | } | |
649 | ||
650 | static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) | |
651 | { | |
652 | struct vmd_dev *vmd; | |
653 | int i, err; | |
654 | ||
655 | if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) | |
656 | return -ENOMEM; | |
657 | ||
658 | vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); | |
659 | if (!vmd) | |
660 | return -ENOMEM; | |
661 | ||
662 | vmd->dev = dev; | |
663 | err = pcim_enable_device(dev); | |
664 | if (err < 0) | |
665 | return err; | |
666 | ||
667 | vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); | |
668 | if (!vmd->cfgbar) | |
669 | return -ENOMEM; | |
670 | ||
671 | pci_set_master(dev); | |
672 | if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && | |
673 | dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) | |
674 | return -ENODEV; | |
675 | ||
676 | vmd->msix_count = pci_msix_vec_count(dev); | |
677 | if (vmd->msix_count < 0) | |
678 | return -ENODEV; | |
679 | ||
680 | vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), | |
681 | GFP_KERNEL); | |
682 | if (!vmd->irqs) | |
683 | return -ENOMEM; | |
684 | ||
685 | vmd->msix_entries = devm_kcalloc(&dev->dev, vmd->msix_count, | |
686 | sizeof(*vmd->msix_entries), | |
687 | GFP_KERNEL); | |
688 | if (!vmd->msix_entries) | |
689 | return -ENOMEM; | |
690 | for (i = 0; i < vmd->msix_count; i++) | |
691 | vmd->msix_entries[i].entry = i; | |
692 | ||
693 | vmd->msix_count = pci_enable_msix_range(vmd->dev, vmd->msix_entries, 1, | |
694 | vmd->msix_count); | |
695 | if (vmd->msix_count < 0) | |
696 | return vmd->msix_count; | |
697 | ||
698 | for (i = 0; i < vmd->msix_count; i++) { | |
699 | INIT_LIST_HEAD(&vmd->irqs[i].irq_list); | |
700 | vmd->irqs[i].vmd_vector = vmd->msix_entries[i].vector; | |
701 | vmd->irqs[i].index = i; | |
702 | ||
703 | err = devm_request_irq(&dev->dev, vmd->irqs[i].vmd_vector, | |
704 | vmd_irq, 0, "vmd", &vmd->irqs[i]); | |
705 | if (err) | |
706 | return err; | |
707 | } | |
708 | ||
709 | spin_lock_init(&vmd->cfg_lock); | |
710 | pci_set_drvdata(dev, vmd); | |
711 | err = vmd_enable_domain(vmd); | |
712 | if (err) | |
713 | return err; | |
714 | ||
715 | dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", | |
716 | vmd->sysdata.domain); | |
717 | return 0; | |
718 | } | |
719 | ||
720 | static void vmd_remove(struct pci_dev *dev) | |
721 | { | |
722 | struct vmd_dev *vmd = pci_get_drvdata(dev); | |
723 | ||
2c2c5c5c | 724 | vmd_detach_resources(vmd); |
185a383a KB |
725 | pci_set_drvdata(dev, NULL); |
726 | sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); | |
727 | pci_stop_root_bus(vmd->bus); | |
728 | pci_remove_root_bus(vmd->bus); | |
729 | vmd_teardown_dma_ops(vmd); | |
730 | irq_domain_remove(vmd->irq_domain); | |
731 | } | |
732 | ||
733 | #ifdef CONFIG_PM | |
734 | static int vmd_suspend(struct device *dev) | |
735 | { | |
736 | struct pci_dev *pdev = to_pci_dev(dev); | |
737 | ||
738 | pci_save_state(pdev); | |
739 | return 0; | |
740 | } | |
741 | ||
742 | static int vmd_resume(struct device *dev) | |
743 | { | |
744 | struct pci_dev *pdev = to_pci_dev(dev); | |
745 | ||
746 | pci_restore_state(pdev); | |
747 | return 0; | |
748 | } | |
749 | #endif | |
750 | static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); | |
751 | ||
752 | static const struct pci_device_id vmd_ids[] = { | |
753 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),}, | |
754 | {0,} | |
755 | }; | |
756 | MODULE_DEVICE_TABLE(pci, vmd_ids); | |
757 | ||
758 | static struct pci_driver vmd_drv = { | |
759 | .name = "vmd", | |
760 | .id_table = vmd_ids, | |
761 | .probe = vmd_probe, | |
762 | .remove = vmd_remove, | |
763 | .driver = { | |
764 | .pm = &vmd_dev_pm_ops, | |
765 | }, | |
766 | }; | |
767 | module_pci_driver(vmd_drv); | |
768 | ||
769 | MODULE_AUTHOR("Intel Corporation"); | |
770 | MODULE_LICENSE("GPL v2"); | |
771 | MODULE_VERSION("0.6"); |