Commit | Line | Data |
---|---|---|
185a383a KB |
1 | /* |
2 | * Volume Management Device driver | |
3 | * Copyright (c) 2015, Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/device.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/irq.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/msi.h> | |
21 | #include <linux/pci.h> | |
22 | #include <linux/rculist.h> | |
23 | #include <linux/rcupdate.h> | |
24 | ||
25 | #include <asm/irqdomain.h> | |
26 | #include <asm/device.h> | |
27 | #include <asm/msi.h> | |
28 | #include <asm/msidef.h> | |
29 | ||
30 | #define VMD_CFGBAR 0 | |
31 | #define VMD_MEMBAR1 2 | |
32 | #define VMD_MEMBAR2 4 | |
33 | ||
34 | /* | |
35 | * Lock for manipulating VMD IRQ lists. | |
36 | */ | |
37 | static DEFINE_RAW_SPINLOCK(list_lock); | |
38 | ||
39 | /** | |
40 | * struct vmd_irq - private data to map driver IRQ to the VMD shared vector | |
41 | * @node: list item for parent traversal. | |
42 | * @rcu: RCU callback item for freeing. | |
43 | * @irq: back pointer to parent. | |
44 | * @virq: the virtual IRQ value provided to the requesting driver. | |
45 | * | |
46 | * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to | |
47 | * a VMD IRQ using this structure. | |
48 | */ | |
49 | struct vmd_irq { | |
50 | struct list_head node; | |
51 | struct rcu_head rcu; | |
52 | struct vmd_irq_list *irq; | |
53 | unsigned int virq; | |
54 | }; | |
55 | ||
56 | /** | |
57 | * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector | |
58 | * @irq_list: the list of irq's the VMD one demuxes to. | |
59 | * @vmd_vector: the h/w IRQ assigned to the VMD. | |
60 | * @index: index into the VMD MSI-X table; used for message routing. | |
61 | * @count: number of child IRQs assigned to this vector; used to track | |
62 | * sharing. | |
63 | */ | |
64 | struct vmd_irq_list { | |
65 | struct list_head irq_list; | |
66 | struct vmd_dev *vmd; | |
67 | unsigned int vmd_vector; | |
68 | unsigned int index; | |
69 | unsigned int count; | |
70 | }; | |
71 | ||
72 | struct vmd_dev { | |
73 | struct pci_dev *dev; | |
74 | ||
75 | spinlock_t cfg_lock; | |
76 | char __iomem *cfgbar; | |
77 | ||
78 | int msix_count; | |
79 | struct msix_entry *msix_entries; | |
80 | struct vmd_irq_list *irqs; | |
81 | ||
82 | struct pci_sysdata sysdata; | |
83 | struct resource resources[3]; | |
84 | struct irq_domain *irq_domain; | |
85 | struct pci_bus *bus; | |
86 | ||
87 | #ifdef CONFIG_X86_DEV_DMA_OPS | |
88 | struct dma_map_ops dma_ops; | |
89 | struct dma_domain dma_domain; | |
90 | #endif | |
91 | }; | |
92 | ||
93 | static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) | |
94 | { | |
95 | return container_of(bus->sysdata, struct vmd_dev, sysdata); | |
96 | } | |
97 | ||
98 | /* | |
99 | * Drivers managing a device in a VMD domain allocate their own IRQs as before, | |
100 | * but the MSI entry for the hardware it's driving will be programmed with a | |
101 | * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its | |
102 | * domain into one of its own, and the VMD driver de-muxes these for the | |
103 | * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations | |
104 | * and irq_chip to set this up. | |
105 | */ | |
106 | static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |
107 | { | |
108 | struct vmd_irq *vmdirq = data->chip_data; | |
109 | struct vmd_irq_list *irq = vmdirq->irq; | |
110 | ||
111 | msg->address_hi = MSI_ADDR_BASE_HI; | |
112 | msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_ID(irq->index); | |
113 | msg->data = 0; | |
114 | } | |
115 | ||
116 | /* | |
117 | * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. | |
118 | */ | |
119 | static void vmd_irq_enable(struct irq_data *data) | |
120 | { | |
121 | struct vmd_irq *vmdirq = data->chip_data; | |
122 | ||
123 | raw_spin_lock(&list_lock); | |
124 | list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); | |
125 | raw_spin_unlock(&list_lock); | |
126 | ||
127 | data->chip->irq_unmask(data); | |
128 | } | |
129 | ||
130 | static void vmd_irq_disable(struct irq_data *data) | |
131 | { | |
132 | struct vmd_irq *vmdirq = data->chip_data; | |
133 | ||
134 | data->chip->irq_mask(data); | |
135 | ||
136 | raw_spin_lock(&list_lock); | |
137 | list_del_rcu(&vmdirq->node); | |
138 | raw_spin_unlock(&list_lock); | |
139 | } | |
140 | ||
141 | /* | |
142 | * XXX: Stubbed until we develop acceptable way to not create conflicts with | |
143 | * other devices sharing the same vector. | |
144 | */ | |
145 | static int vmd_irq_set_affinity(struct irq_data *data, | |
146 | const struct cpumask *dest, bool force) | |
147 | { | |
148 | return -EINVAL; | |
149 | } | |
150 | ||
151 | static struct irq_chip vmd_msi_controller = { | |
152 | .name = "VMD-MSI", | |
153 | .irq_enable = vmd_irq_enable, | |
154 | .irq_disable = vmd_irq_disable, | |
155 | .irq_compose_msi_msg = vmd_compose_msi_msg, | |
156 | .irq_set_affinity = vmd_irq_set_affinity, | |
157 | }; | |
158 | ||
159 | static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, | |
160 | msi_alloc_info_t *arg) | |
161 | { | |
162 | return 0; | |
163 | } | |
164 | ||
165 | /* | |
166 | * XXX: We can be even smarter selecting the best IRQ once we solve the | |
167 | * affinity problem. | |
168 | */ | |
169 | static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd) | |
170 | { | |
171 | int i, best = 0; | |
172 | ||
173 | raw_spin_lock(&list_lock); | |
174 | for (i = 1; i < vmd->msix_count; i++) | |
175 | if (vmd->irqs[i].count < vmd->irqs[best].count) | |
176 | best = i; | |
177 | vmd->irqs[best].count++; | |
178 | raw_spin_unlock(&list_lock); | |
179 | ||
180 | return &vmd->irqs[best]; | |
181 | } | |
182 | ||
183 | static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, | |
184 | unsigned int virq, irq_hw_number_t hwirq, | |
185 | msi_alloc_info_t *arg) | |
186 | { | |
187 | struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(arg->desc)->bus); | |
188 | struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); | |
189 | ||
190 | if (!vmdirq) | |
191 | return -ENOMEM; | |
192 | ||
193 | INIT_LIST_HEAD(&vmdirq->node); | |
194 | vmdirq->irq = vmd_next_irq(vmd); | |
195 | vmdirq->virq = virq; | |
196 | ||
197 | irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip, | |
198 | vmdirq, handle_simple_irq, vmd, NULL); | |
199 | return 0; | |
200 | } | |
201 | ||
202 | static void vmd_msi_free(struct irq_domain *domain, | |
203 | struct msi_domain_info *info, unsigned int virq) | |
204 | { | |
205 | struct vmd_irq *vmdirq = irq_get_chip_data(virq); | |
206 | ||
207 | /* XXX: Potential optimization to rebalance */ | |
208 | raw_spin_lock(&list_lock); | |
209 | vmdirq->irq->count--; | |
210 | raw_spin_unlock(&list_lock); | |
211 | ||
212 | kfree_rcu(vmdirq, rcu); | |
213 | } | |
214 | ||
215 | static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, | |
216 | int nvec, msi_alloc_info_t *arg) | |
217 | { | |
218 | struct pci_dev *pdev = to_pci_dev(dev); | |
219 | struct vmd_dev *vmd = vmd_from_bus(pdev->bus); | |
220 | ||
221 | if (nvec > vmd->msix_count) | |
222 | return vmd->msix_count; | |
223 | ||
224 | memset(arg, 0, sizeof(*arg)); | |
225 | return 0; | |
226 | } | |
227 | ||
228 | static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) | |
229 | { | |
230 | arg->desc = desc; | |
231 | } | |
232 | ||
233 | static struct msi_domain_ops vmd_msi_domain_ops = { | |
234 | .get_hwirq = vmd_get_hwirq, | |
235 | .msi_init = vmd_msi_init, | |
236 | .msi_free = vmd_msi_free, | |
237 | .msi_prepare = vmd_msi_prepare, | |
238 | .set_desc = vmd_set_desc, | |
239 | }; | |
240 | ||
241 | static struct msi_domain_info vmd_msi_domain_info = { | |
242 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | |
243 | MSI_FLAG_PCI_MSIX, | |
244 | .ops = &vmd_msi_domain_ops, | |
245 | .chip = &vmd_msi_controller, | |
246 | }; | |
247 | ||
248 | #ifdef CONFIG_X86_DEV_DMA_OPS | |
249 | /* | |
250 | * VMD replaces the requester ID with its own. DMA mappings for devices in a | |
251 | * VMD domain need to be mapped for the VMD, not the device requiring | |
252 | * the mapping. | |
253 | */ | |
254 | static struct device *to_vmd_dev(struct device *dev) | |
255 | { | |
256 | struct pci_dev *pdev = to_pci_dev(dev); | |
257 | struct vmd_dev *vmd = vmd_from_bus(pdev->bus); | |
258 | ||
259 | return &vmd->dev->dev; | |
260 | } | |
261 | ||
262 | static struct dma_map_ops *vmd_dma_ops(struct device *dev) | |
263 | { | |
264 | return to_vmd_dev(dev)->archdata.dma_ops; | |
265 | } | |
266 | ||
267 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, | |
268 | gfp_t flag, struct dma_attrs *attrs) | |
269 | { | |
270 | return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, | |
271 | attrs); | |
272 | } | |
273 | ||
274 | static void vmd_free(struct device *dev, size_t size, void *vaddr, | |
275 | dma_addr_t addr, struct dma_attrs *attrs) | |
276 | { | |
277 | return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, | |
278 | attrs); | |
279 | } | |
280 | ||
281 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, | |
282 | void *cpu_addr, dma_addr_t addr, size_t size, | |
283 | struct dma_attrs *attrs) | |
284 | { | |
285 | return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, | |
286 | size, attrs); | |
287 | } | |
288 | ||
289 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, | |
290 | void *cpu_addr, dma_addr_t addr, size_t size, | |
291 | struct dma_attrs *attrs) | |
292 | { | |
293 | return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, | |
294 | addr, size, attrs); | |
295 | } | |
296 | ||
297 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, | |
298 | unsigned long offset, size_t size, | |
299 | enum dma_data_direction dir, | |
300 | struct dma_attrs *attrs) | |
301 | { | |
302 | return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, | |
303 | dir, attrs); | |
304 | } | |
305 | ||
306 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, | |
307 | enum dma_data_direction dir, struct dma_attrs *attrs) | |
308 | { | |
309 | vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); | |
310 | } | |
311 | ||
312 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
313 | enum dma_data_direction dir, struct dma_attrs *attrs) | |
314 | { | |
315 | return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | |
316 | } | |
317 | ||
318 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
319 | enum dma_data_direction dir, struct dma_attrs *attrs) | |
320 | { | |
321 | vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | |
322 | } | |
323 | ||
324 | static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |
325 | size_t size, enum dma_data_direction dir) | |
326 | { | |
327 | vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); | |
328 | } | |
329 | ||
330 | static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, | |
331 | size_t size, enum dma_data_direction dir) | |
332 | { | |
333 | vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, | |
334 | dir); | |
335 | } | |
336 | ||
337 | static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
338 | int nents, enum dma_data_direction dir) | |
339 | { | |
340 | vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); | |
341 | } | |
342 | ||
343 | static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
344 | int nents, enum dma_data_direction dir) | |
345 | { | |
346 | vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); | |
347 | } | |
348 | ||
349 | static int vmd_mapping_error(struct device *dev, dma_addr_t addr) | |
350 | { | |
351 | return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); | |
352 | } | |
353 | ||
354 | static int vmd_dma_supported(struct device *dev, u64 mask) | |
355 | { | |
356 | return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); | |
357 | } | |
358 | ||
359 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK | |
360 | static u64 vmd_get_required_mask(struct device *dev) | |
361 | { | |
362 | return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); | |
363 | } | |
364 | #endif | |
365 | ||
366 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) | |
367 | { | |
368 | struct dma_domain *domain = &vmd->dma_domain; | |
369 | ||
370 | if (vmd->dev->dev.archdata.dma_ops) | |
371 | del_dma_domain(domain); | |
372 | } | |
373 | ||
374 | #define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ | |
375 | do { \ | |
376 | if (source->fn) \ | |
377 | dest->fn = vmd_##fn; \ | |
378 | } while (0) | |
379 | ||
380 | static void vmd_setup_dma_ops(struct vmd_dev *vmd) | |
381 | { | |
382 | const struct dma_map_ops *source = vmd->dev->dev.archdata.dma_ops; | |
383 | struct dma_map_ops *dest = &vmd->dma_ops; | |
384 | struct dma_domain *domain = &vmd->dma_domain; | |
385 | ||
386 | domain->domain_nr = vmd->sysdata.domain; | |
387 | domain->dma_ops = dest; | |
388 | ||
389 | if (!source) | |
390 | return; | |
391 | ASSIGN_VMD_DMA_OPS(source, dest, alloc); | |
392 | ASSIGN_VMD_DMA_OPS(source, dest, free); | |
393 | ASSIGN_VMD_DMA_OPS(source, dest, mmap); | |
394 | ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); | |
395 | ASSIGN_VMD_DMA_OPS(source, dest, map_page); | |
396 | ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); | |
397 | ASSIGN_VMD_DMA_OPS(source, dest, map_sg); | |
398 | ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); | |
399 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); | |
400 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); | |
401 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); | |
402 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); | |
403 | ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); | |
404 | ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); | |
405 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK | |
406 | ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); | |
407 | #endif | |
408 | add_dma_domain(domain); | |
409 | } | |
410 | #undef ASSIGN_VMD_DMA_OPS | |
411 | #else | |
412 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} | |
413 | static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} | |
414 | #endif | |
415 | ||
416 | static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, | |
417 | unsigned int devfn, int reg, int len) | |
418 | { | |
419 | char __iomem *addr = vmd->cfgbar + | |
420 | (bus->number << 20) + (devfn << 12) + reg; | |
421 | ||
422 | if ((addr - vmd->cfgbar) + len >= | |
423 | resource_size(&vmd->dev->resource[VMD_CFGBAR])) | |
424 | return NULL; | |
425 | ||
426 | return addr; | |
427 | } | |
428 | ||
429 | /* | |
430 | * CPU may deadlock if config space is not serialized on some versions of this | |
431 | * hardware, so all config space access is done under a spinlock. | |
432 | */ | |
433 | static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, | |
434 | int len, u32 *value) | |
435 | { | |
436 | struct vmd_dev *vmd = vmd_from_bus(bus); | |
437 | char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); | |
438 | unsigned long flags; | |
439 | int ret = 0; | |
440 | ||
441 | if (!addr) | |
442 | return -EFAULT; | |
443 | ||
444 | spin_lock_irqsave(&vmd->cfg_lock, flags); | |
445 | switch (len) { | |
446 | case 1: | |
447 | *value = readb(addr); | |
448 | break; | |
449 | case 2: | |
450 | *value = readw(addr); | |
451 | break; | |
452 | case 4: | |
453 | *value = readl(addr); | |
454 | break; | |
455 | default: | |
456 | ret = -EINVAL; | |
457 | break; | |
458 | } | |
459 | spin_unlock_irqrestore(&vmd->cfg_lock, flags); | |
460 | return ret; | |
461 | } | |
462 | ||
463 | /* | |
464 | * VMD h/w converts non-posted config writes to posted memory writes. The | |
465 | * read-back in this function forces the completion so it returns only after | |
466 | * the config space was written, as expected. | |
467 | */ | |
468 | static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, | |
469 | int len, u32 value) | |
470 | { | |
471 | struct vmd_dev *vmd = vmd_from_bus(bus); | |
472 | char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); | |
473 | unsigned long flags; | |
474 | int ret = 0; | |
475 | ||
476 | if (!addr) | |
477 | return -EFAULT; | |
478 | ||
479 | spin_lock_irqsave(&vmd->cfg_lock, flags); | |
480 | switch (len) { | |
481 | case 1: | |
482 | writeb(value, addr); | |
483 | readb(addr); | |
484 | break; | |
485 | case 2: | |
486 | writew(value, addr); | |
487 | readw(addr); | |
488 | break; | |
489 | case 4: | |
490 | writel(value, addr); | |
491 | readl(addr); | |
492 | break; | |
493 | default: | |
494 | ret = -EINVAL; | |
495 | break; | |
496 | } | |
497 | spin_unlock_irqrestore(&vmd->cfg_lock, flags); | |
498 | return ret; | |
499 | } | |
500 | ||
501 | static struct pci_ops vmd_ops = { | |
502 | .read = vmd_pci_read, | |
503 | .write = vmd_pci_write, | |
504 | }; | |
505 | ||
2c2c5c5c JD |
506 | static void vmd_attach_resources(struct vmd_dev *vmd) |
507 | { | |
508 | vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; | |
509 | vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; | |
510 | } | |
511 | ||
512 | static void vmd_detach_resources(struct vmd_dev *vmd) | |
513 | { | |
514 | vmd->dev->resource[VMD_MEMBAR1].child = NULL; | |
515 | vmd->dev->resource[VMD_MEMBAR2].child = NULL; | |
516 | } | |
517 | ||
185a383a KB |
518 | /* |
519 | * VMD domains start at 0x1000 to not clash with ACPI _SEG domains. | |
520 | */ | |
521 | static int vmd_find_free_domain(void) | |
522 | { | |
523 | int domain = 0xffff; | |
524 | struct pci_bus *bus = NULL; | |
525 | ||
526 | while ((bus = pci_find_next_bus(bus)) != NULL) | |
527 | domain = max_t(int, domain, pci_domain_nr(bus)); | |
528 | return domain + 1; | |
529 | } | |
530 | ||
531 | static int vmd_enable_domain(struct vmd_dev *vmd) | |
532 | { | |
533 | struct pci_sysdata *sd = &vmd->sysdata; | |
534 | struct resource *res; | |
535 | u32 upper_bits; | |
536 | unsigned long flags; | |
537 | LIST_HEAD(resources); | |
538 | ||
539 | res = &vmd->dev->resource[VMD_CFGBAR]; | |
540 | vmd->resources[0] = (struct resource) { | |
541 | .name = "VMD CFGBAR", | |
d068c350 | 542 | .start = 0, |
185a383a KB |
543 | .end = (resource_size(res) >> 20) - 1, |
544 | .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, | |
545 | }; | |
546 | ||
83cc54a6 KB |
547 | /* |
548 | * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can | |
549 | * put 32-bit resources in the window. | |
550 | * | |
551 | * There's no hardware reason why a 64-bit window *couldn't* | |
552 | * contain a 32-bit resource, but pbus_size_mem() computes the | |
553 | * bridge window size assuming a 64-bit window will contain no | |
554 | * 32-bit resources. __pci_assign_resource() enforces that | |
555 | * artificial restriction to make sure everything will fit. | |
556 | * | |
557 | * The only way we could use a 64-bit non-prefechable MEMBAR is | |
558 | * if its address is <4GB so that we can convert it to a 32-bit | |
559 | * resource. To be visible to the host OS, all VMD endpoints must | |
560 | * be initially configured by platform BIOS, which includes setting | |
561 | * up these resources. We can assume the device is configured | |
562 | * according to the platform needs. | |
563 | */ | |
185a383a KB |
564 | res = &vmd->dev->resource[VMD_MEMBAR1]; |
565 | upper_bits = upper_32_bits(res->end); | |
566 | flags = res->flags & ~IORESOURCE_SIZEALIGN; | |
567 | if (!upper_bits) | |
568 | flags &= ~IORESOURCE_MEM_64; | |
569 | vmd->resources[1] = (struct resource) { | |
570 | .name = "VMD MEMBAR1", | |
571 | .start = res->start, | |
572 | .end = res->end, | |
573 | .flags = flags, | |
2c2c5c5c | 574 | .parent = res, |
185a383a KB |
575 | }; |
576 | ||
577 | res = &vmd->dev->resource[VMD_MEMBAR2]; | |
578 | upper_bits = upper_32_bits(res->end); | |
579 | flags = res->flags & ~IORESOURCE_SIZEALIGN; | |
580 | if (!upper_bits) | |
581 | flags &= ~IORESOURCE_MEM_64; | |
582 | vmd->resources[2] = (struct resource) { | |
583 | .name = "VMD MEMBAR2", | |
584 | .start = res->start + 0x2000, | |
585 | .end = res->end, | |
586 | .flags = flags, | |
2c2c5c5c | 587 | .parent = res, |
185a383a KB |
588 | }; |
589 | ||
590 | sd->domain = vmd_find_free_domain(); | |
591 | if (sd->domain < 0) | |
592 | return sd->domain; | |
593 | ||
594 | sd->node = pcibus_to_node(vmd->dev->bus); | |
595 | ||
596 | vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info, | |
597 | NULL); | |
598 | if (!vmd->irq_domain) | |
599 | return -ENODEV; | |
600 | ||
601 | pci_add_resource(&resources, &vmd->resources[0]); | |
602 | pci_add_resource(&resources, &vmd->resources[1]); | |
603 | pci_add_resource(&resources, &vmd->resources[2]); | |
604 | vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd, | |
605 | &resources); | |
606 | if (!vmd->bus) { | |
607 | pci_free_resource_list(&resources); | |
608 | irq_domain_remove(vmd->irq_domain); | |
609 | return -ENODEV; | |
610 | } | |
611 | ||
2c2c5c5c | 612 | vmd_attach_resources(vmd); |
185a383a KB |
613 | vmd_setup_dma_ops(vmd); |
614 | dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); | |
615 | pci_rescan_bus(vmd->bus); | |
616 | ||
617 | WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, | |
618 | "domain"), "Can't create symlink to domain\n"); | |
619 | return 0; | |
620 | } | |
621 | ||
622 | static irqreturn_t vmd_irq(int irq, void *data) | |
623 | { | |
624 | struct vmd_irq_list *irqs = data; | |
625 | struct vmd_irq *vmdirq; | |
626 | ||
627 | rcu_read_lock(); | |
628 | list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) | |
629 | generic_handle_irq(vmdirq->virq); | |
630 | rcu_read_unlock(); | |
631 | ||
632 | return IRQ_HANDLED; | |
633 | } | |
634 | ||
635 | static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) | |
636 | { | |
637 | struct vmd_dev *vmd; | |
638 | int i, err; | |
639 | ||
640 | if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) | |
641 | return -ENOMEM; | |
642 | ||
643 | vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); | |
644 | if (!vmd) | |
645 | return -ENOMEM; | |
646 | ||
647 | vmd->dev = dev; | |
648 | err = pcim_enable_device(dev); | |
649 | if (err < 0) | |
650 | return err; | |
651 | ||
652 | vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); | |
653 | if (!vmd->cfgbar) | |
654 | return -ENOMEM; | |
655 | ||
656 | pci_set_master(dev); | |
657 | if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && | |
658 | dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) | |
659 | return -ENODEV; | |
660 | ||
661 | vmd->msix_count = pci_msix_vec_count(dev); | |
662 | if (vmd->msix_count < 0) | |
663 | return -ENODEV; | |
664 | ||
665 | vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), | |
666 | GFP_KERNEL); | |
667 | if (!vmd->irqs) | |
668 | return -ENOMEM; | |
669 | ||
670 | vmd->msix_entries = devm_kcalloc(&dev->dev, vmd->msix_count, | |
671 | sizeof(*vmd->msix_entries), | |
672 | GFP_KERNEL); | |
673 | if (!vmd->msix_entries) | |
674 | return -ENOMEM; | |
675 | for (i = 0; i < vmd->msix_count; i++) | |
676 | vmd->msix_entries[i].entry = i; | |
677 | ||
678 | vmd->msix_count = pci_enable_msix_range(vmd->dev, vmd->msix_entries, 1, | |
679 | vmd->msix_count); | |
680 | if (vmd->msix_count < 0) | |
681 | return vmd->msix_count; | |
682 | ||
683 | for (i = 0; i < vmd->msix_count; i++) { | |
684 | INIT_LIST_HEAD(&vmd->irqs[i].irq_list); | |
685 | vmd->irqs[i].vmd_vector = vmd->msix_entries[i].vector; | |
686 | vmd->irqs[i].index = i; | |
687 | ||
688 | err = devm_request_irq(&dev->dev, vmd->irqs[i].vmd_vector, | |
689 | vmd_irq, 0, "vmd", &vmd->irqs[i]); | |
690 | if (err) | |
691 | return err; | |
692 | } | |
693 | ||
694 | spin_lock_init(&vmd->cfg_lock); | |
695 | pci_set_drvdata(dev, vmd); | |
696 | err = vmd_enable_domain(vmd); | |
697 | if (err) | |
698 | return err; | |
699 | ||
700 | dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", | |
701 | vmd->sysdata.domain); | |
702 | return 0; | |
703 | } | |
704 | ||
705 | static void vmd_remove(struct pci_dev *dev) | |
706 | { | |
707 | struct vmd_dev *vmd = pci_get_drvdata(dev); | |
708 | ||
2c2c5c5c | 709 | vmd_detach_resources(vmd); |
185a383a KB |
710 | pci_set_drvdata(dev, NULL); |
711 | sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); | |
712 | pci_stop_root_bus(vmd->bus); | |
713 | pci_remove_root_bus(vmd->bus); | |
714 | vmd_teardown_dma_ops(vmd); | |
715 | irq_domain_remove(vmd->irq_domain); | |
716 | } | |
717 | ||
718 | #ifdef CONFIG_PM | |
719 | static int vmd_suspend(struct device *dev) | |
720 | { | |
721 | struct pci_dev *pdev = to_pci_dev(dev); | |
722 | ||
723 | pci_save_state(pdev); | |
724 | return 0; | |
725 | } | |
726 | ||
727 | static int vmd_resume(struct device *dev) | |
728 | { | |
729 | struct pci_dev *pdev = to_pci_dev(dev); | |
730 | ||
731 | pci_restore_state(pdev); | |
732 | return 0; | |
733 | } | |
734 | #endif | |
735 | static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); | |
736 | ||
737 | static const struct pci_device_id vmd_ids[] = { | |
738 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),}, | |
739 | {0,} | |
740 | }; | |
741 | MODULE_DEVICE_TABLE(pci, vmd_ids); | |
742 | ||
743 | static struct pci_driver vmd_drv = { | |
744 | .name = "vmd", | |
745 | .id_table = vmd_ids, | |
746 | .probe = vmd_probe, | |
747 | .remove = vmd_remove, | |
748 | .driver = { | |
749 | .pm = &vmd_dev_pm_ops, | |
750 | }, | |
751 | }; | |
752 | module_pci_driver(vmd_drv); | |
753 | ||
754 | MODULE_AUTHOR("Intel Corporation"); | |
755 | MODULE_LICENSE("GPL v2"); | |
756 | MODULE_VERSION("0.6"); |