Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
3b7d1921 EB |
2 | #ifndef LINUX_MSI_H |
3 | #define LINUX_MSI_H | |
4 | ||
3ba1f050 | 5 | #include <linux/cpumask.h> |
b5f687f9 | 6 | #include <linux/mutex.h> |
4aa9bc95 | 7 | #include <linux/list.h> |
8073c1ac TG |
8 | #include <asm/msi.h> |
9 | ||
10 | /* Dummy shadow structures if an architecture does not define them */ | |
11 | #ifndef arch_msi_msg_addr_lo | |
12 | typedef struct arch_msi_msg_addr_lo { | |
13 | u32 address_lo; | |
14 | } __attribute__ ((packed)) arch_msi_msg_addr_lo_t; | |
15 | #endif | |
16 | ||
17 | #ifndef arch_msi_msg_addr_hi | |
18 | typedef struct arch_msi_msg_addr_hi { | |
19 | u32 address_hi; | |
20 | } __attribute__ ((packed)) arch_msi_msg_addr_hi_t; | |
21 | #endif | |
22 | ||
23 | #ifndef arch_msi_msg_data | |
24 | typedef struct arch_msi_msg_data { | |
25 | u32 data; | |
26 | } __attribute__ ((packed)) arch_msi_msg_data_t; | |
27 | #endif | |
4aa9bc95 | 28 | |
8073c1ac TG |
29 | /** |
30 | * msi_msg - Representation of a MSI message | |
31 | * @address_lo: Low 32 bits of msi message address | |
32 | * @arch_addrlo: Architecture specific shadow of @address_lo | |
33 | * @address_hi: High 32 bits of msi message address | |
34 | * (only used when device supports it) | |
35 | * @arch_addrhi: Architecture specific shadow of @address_hi | |
36 | * @data: MSI message data (usually 16 bits) | |
37 | * @arch_data: Architecture specific shadow of @data | |
38 | */ | |
3b7d1921 | 39 | struct msi_msg { |
8073c1ac TG |
40 | union { |
41 | u32 address_lo; | |
42 | arch_msi_msg_addr_lo_t arch_addr_lo; | |
43 | }; | |
44 | union { | |
45 | u32 address_hi; | |
46 | arch_msi_msg_addr_hi_t arch_addr_hi; | |
47 | }; | |
48 | union { | |
49 | u32 data; | |
50 | arch_msi_msg_data_t arch_data; | |
51 | }; | |
3b7d1921 EB |
52 | }; |
53 | ||
38737d82 | 54 | extern int pci_msi_ignore_mask; |
c54c1879 | 55 | /* Helper functions */ |
1c9db525 | 56 | struct irq_data; |
39431acb | 57 | struct msi_desc; |
25a98bd4 | 58 | struct pci_dev; |
c09fcc4b | 59 | struct platform_msi_priv_data; |
bf6e054e TG |
60 | struct attribute_group; |
61 | ||
2366d06e | 62 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
2f44e29c | 63 | #ifdef CONFIG_GENERIC_MSI_IRQ |
2366d06e | 64 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); |
2f44e29c AB |
65 | #else |
66 | static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | |
67 | { | |
68 | } | |
69 | #endif | |
891d4a48 | 70 | |
c09fcc4b MZ |
71 | typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, |
72 | struct msi_msg *msg); | |
73 | ||
e58f2259 TG |
74 | /** |
75 | * pci_msi_desc - PCI/MSI specific MSI descriptor data | |
76 | * | |
77 | * @msi_mask: [PCI MSI] MSI cached mask bits | |
78 | * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits | |
79 | * @is_msix: [PCI MSI/X] True if MSI-X | |
80 | * @multiple: [PCI MSI/X] log2 num of messages allocated | |
81 | * @multi_cap: [PCI MSI/X] log2 num of messages supported | |
82 | * @can_mask: [PCI MSI/X] Masking supported? | |
83 | * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit | |
e58f2259 TG |
84 | * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq |
85 | * @mask_pos: [PCI MSI] Mask register position | |
86 | * @mask_base: [PCI MSI-X] Mask register base address | |
87 | */ | |
88 | struct pci_msi_desc { | |
89 | union { | |
90 | u32 msi_mask; | |
91 | u32 msix_ctrl; | |
92 | }; | |
93 | struct { | |
94 | u8 is_msix : 1; | |
95 | u8 multiple : 3; | |
96 | u8 multi_cap : 3; | |
97 | u8 can_mask : 1; | |
98 | u8 is_64 : 1; | |
99 | u8 is_virtual : 1; | |
e58f2259 TG |
100 | unsigned default_irq; |
101 | } msi_attrib; | |
102 | union { | |
103 | u8 mask_pos; | |
104 | void __iomem *mask_base; | |
105 | }; | |
106 | }; | |
107 | ||
fc88419c JL |
108 | /** |
109 | * struct msi_desc - Descriptor structure for MSI based interrupts | |
110 | * @list: List head for management | |
111 | * @irq: The base interrupt number | |
112 | * @nvec_used: The number of vectors used | |
113 | * @dev: Pointer to the device which uses this descriptor | |
114 | * @msg: The last set MSI message cached for reuse | |
0972fa57 | 115 | * @affinity: Optional pointer to a cpu affinity mask for this descriptor |
fc88419c | 116 | * |
d7cc609f LG |
117 | * @write_msi_msg: Callback that may be called when the MSI message |
118 | * address or data changes | |
119 | * @write_msi_msg_data: Data parameter for the callback. | |
120 | * | |
20c6d424 | 121 | * @msi_index: Index of the msi descriptor |
0f180958 | 122 | * @pci: PCI specific msi descriptor data |
fc88419c | 123 | */ |
3b7d1921 | 124 | struct msi_desc { |
fc88419c JL |
125 | /* Shared device/bus type independent data */ |
126 | struct list_head list; | |
127 | unsigned int irq; | |
128 | unsigned int nvec_used; | |
129 | struct device *dev; | |
130 | struct msi_msg msg; | |
bec04037 | 131 | struct irq_affinity_desc *affinity; |
aaebdf8d JG |
132 | #ifdef CONFIG_IRQ_MSI_IOMMU |
133 | const void *iommu_cookie; | |
134 | #endif | |
3b7d1921 | 135 | |
d7cc609f LG |
136 | void (*write_msi_msg)(struct msi_desc *entry, void *data); |
137 | void *write_msi_msg_data; | |
138 | ||
20c6d424 | 139 | u16 msi_index; |
0f180958 | 140 | struct pci_msi_desc pci; |
3b7d1921 EB |
141 | }; |
142 | ||
013bd8e5 TG |
143 | /** |
144 | * msi_device_data - MSI per device data | |
145 | * @properties: MSI properties which are interesting to drivers | |
bf6e054e | 146 | * @attrs: Pointer to the sysfs attribute group |
fc22e7db | 147 | * @platform_data: Platform-MSI specific data |
125282cd | 148 | * @list: List of MSI descriptors associated to the device |
b5f687f9 | 149 | * @mutex: Mutex protecting the MSI list |
013bd8e5 TG |
150 | */ |
151 | struct msi_device_data { | |
152 | unsigned long properties; | |
bf6e054e | 153 | const struct attribute_group **attrs; |
fc22e7db | 154 | struct platform_msi_priv_data *platform_data; |
125282cd | 155 | struct list_head list; |
b5f687f9 | 156 | struct mutex mutex; |
013bd8e5 TG |
157 | }; |
158 | ||
159 | int msi_setup_device_data(struct device *dev); | |
160 | ||
cf15f43a | 161 | unsigned int msi_get_virq(struct device *dev, unsigned int index); |
b5f687f9 TG |
162 | void msi_lock_descs(struct device *dev); |
163 | void msi_unlock_descs(struct device *dev); | |
cf15f43a | 164 | |
d31eb342 | 165 | /* Helpers to hide struct msi_desc implementation details */ |
25a98bd4 | 166 | #define msi_desc_to_dev(desc) ((desc)->dev) |
125282cd | 167 | #define dev_to_msi_list(dev) (&(dev)->msi.data->list) |
d31eb342 JL |
168 | #define first_msi_entry(dev) \ |
169 | list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) | |
170 | #define for_each_msi_entry(desc, dev) \ | |
171 | list_for_each_entry((desc), dev_to_msi_list((dev)), list) | |
81b1e6e6 MR |
172 | #define for_each_msi_entry_safe(desc, tmp, dev) \ |
173 | list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) | |
4c457e8c MZ |
174 | #define for_each_msi_vector(desc, __irq, dev) \ |
175 | for_each_msi_entry((desc), (dev)) \ | |
176 | if ((desc)->irq) \ | |
177 | for (__irq = (desc)->irq; \ | |
178 | __irq < ((desc)->irq + (desc)->nvec_used); \ | |
179 | __irq++) | |
d31eb342 | 180 | |
aaebdf8d JG |
181 | #ifdef CONFIG_IRQ_MSI_IOMMU |
182 | static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) | |
183 | { | |
184 | return desc->iommu_cookie; | |
185 | } | |
186 | ||
187 | static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, | |
188 | const void *iommu_cookie) | |
189 | { | |
190 | desc->iommu_cookie = iommu_cookie; | |
191 | } | |
192 | #else | |
193 | static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) | |
194 | { | |
195 | return NULL; | |
196 | } | |
197 | ||
198 | static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, | |
199 | const void *iommu_cookie) | |
200 | { | |
201 | } | |
202 | #endif | |
203 | ||
d31eb342 JL |
204 | #ifdef CONFIG_PCI_MSI |
205 | #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) | |
206 | #define for_each_pci_msi_entry(desc, pdev) \ | |
207 | for_each_msi_entry((desc), &(pdev)->dev) | |
208 | ||
25a98bd4 | 209 | struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); |
2f44e29c | 210 | void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); |
c179c9b9 | 211 | #else /* CONFIG_PCI_MSI */ |
2f44e29c AB |
212 | static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) |
213 | { | |
214 | } | |
d31eb342 JL |
215 | #endif /* CONFIG_PCI_MSI */ |
216 | ||
28f4b041 | 217 | struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, |
bec04037 | 218 | const struct irq_affinity_desc *affinity); |
aa48b6f7 | 219 | void free_msi_entry(struct msi_desc *entry); |
891d4a48 | 220 | void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
83a18912 | 221 | void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
83a18912 | 222 | |
23ed8d57 TG |
223 | void pci_msi_mask_irq(struct irq_data *data); |
224 | void pci_msi_unmask_irq(struct irq_data *data); | |
225 | ||
1197528a | 226 | #ifdef CONFIG_SYSFS |
013bd8e5 TG |
227 | int msi_device_populate_sysfs(struct device *dev); |
228 | void msi_device_destroy_sysfs(struct device *dev); | |
24cff375 | 229 | #else /* CONFIG_SYSFS */ |
013bd8e5 TG |
230 | static inline int msi_device_populate_sysfs(struct device *dev) { return 0; } |
231 | static inline void msi_device_destroy_sysfs(struct device *dev) { } | |
24cff375 | 232 | #endif /* !CONFIG_SYSFS */ |
2f170814 | 233 | |
3b7d1921 | 234 | /* |
077ee78e TG |
235 | * The arch hooks to setup up msi irqs. Default functions are implemented |
236 | * as weak symbols so that they /can/ be overriden by architecture specific | |
b227be0d | 237 | * code if needed. These hooks can only be enabled by the architecture. |
077ee78e TG |
238 | * |
239 | * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by | |
240 | * stubs with warnings. | |
3b7d1921 | 241 | */ |
077ee78e | 242 | #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS |
f7feaca7 | 243 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); |
3b7d1921 | 244 | void arch_teardown_msi_irq(unsigned int irq); |
2366d06e BH |
245 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
246 | void arch_teardown_msi_irqs(struct pci_dev *dev); | |
24cff375 | 247 | #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ |
077ee78e TG |
248 | |
249 | /* | |
ae72f315 TG |
250 | * The restore hook is still available even for fully irq domain based |
251 | * setups. Courtesy to XEN/X86. | |
077ee78e | 252 | */ |
ae72f315 | 253 | bool arch_restore_msi_irqs(struct pci_dev *dev); |
3b7d1921 | 254 | |
f3cf8bb0 | 255 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN |
d9109698 | 256 | |
aeeb5965 | 257 | #include <linux/irqhandler.h> |
d9109698 | 258 | |
f3cf8bb0 | 259 | struct irq_domain; |
552c494a | 260 | struct irq_domain_ops; |
f3cf8bb0 JL |
261 | struct irq_chip; |
262 | struct device_node; | |
be5436c8 | 263 | struct fwnode_handle; |
f3cf8bb0 JL |
264 | struct msi_domain_info; |
265 | ||
266 | /** | |
267 | * struct msi_domain_ops - MSI interrupt domain callbacks | |
268 | * @get_hwirq: Retrieve the resulting hw irq number | |
269 | * @msi_init: Domain specific init function for MSI interrupts | |
270 | * @msi_free: Domain specific function to free a MSI interrupts | |
d9109698 JL |
271 | * @msi_check: Callback for verification of the domain/info/dev data |
272 | * @msi_prepare: Prepare the allocation of the interrupts in the domain | |
d9109698 | 273 | * @set_desc: Set the msi descriptor for an interrupt |
43e9e705 TG |
274 | * @domain_alloc_irqs: Optional function to override the default allocation |
275 | * function. | |
276 | * @domain_free_irqs: Optional function to override the default free | |
277 | * function. | |
d9109698 | 278 | * |
1dd2c6a0 TG |
279 | * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying |
280 | * irqdomain. | |
d9109698 | 281 | * |
89033762 | 282 | * @msi_check, @msi_prepare and @set_desc are callbacks used by |
1dd2c6a0 | 283 | * msi_domain_alloc/free_irqs(). |
43e9e705 TG |
284 | * |
285 | * @domain_alloc_irqs, @domain_free_irqs can be used to override the | |
286 | * default allocation/free functions (__msi_domain_alloc/free_irqs). This | |
287 | * is initially for a wrapper around XENs seperate MSI universe which can't | |
288 | * be wrapped into the regular irq domains concepts by mere mortals. This | |
289 | * allows to universally use msi_domain_alloc/free_irqs without having to | |
290 | * special case XEN all over the place. | |
291 | * | |
292 | * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs | |
293 | * are set to the default implementation if NULL and even when | |
294 | * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and | |
295 | * because these callbacks are obviously mandatory. | |
296 | * | |
297 | * This is NOT meant to be abused, but it can be useful to build wrappers | |
298 | * for specialized MSI irq domains which need extra work before and after | |
299 | * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs(). | |
f3cf8bb0 JL |
300 | */ |
301 | struct msi_domain_ops { | |
aeeb5965 JL |
302 | irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, |
303 | msi_alloc_info_t *arg); | |
f3cf8bb0 JL |
304 | int (*msi_init)(struct irq_domain *domain, |
305 | struct msi_domain_info *info, | |
306 | unsigned int virq, irq_hw_number_t hwirq, | |
aeeb5965 | 307 | msi_alloc_info_t *arg); |
f3cf8bb0 JL |
308 | void (*msi_free)(struct irq_domain *domain, |
309 | struct msi_domain_info *info, | |
310 | unsigned int virq); | |
d9109698 JL |
311 | int (*msi_check)(struct irq_domain *domain, |
312 | struct msi_domain_info *info, | |
313 | struct device *dev); | |
314 | int (*msi_prepare)(struct irq_domain *domain, | |
315 | struct device *dev, int nvec, | |
316 | msi_alloc_info_t *arg); | |
d9109698 JL |
317 | void (*set_desc)(msi_alloc_info_t *arg, |
318 | struct msi_desc *desc); | |
43e9e705 TG |
319 | int (*domain_alloc_irqs)(struct irq_domain *domain, |
320 | struct device *dev, int nvec); | |
321 | void (*domain_free_irqs)(struct irq_domain *domain, | |
322 | struct device *dev); | |
f3cf8bb0 JL |
323 | }; |
324 | ||
325 | /** | |
326 | * struct msi_domain_info - MSI interrupt domain data | |
aeeb5965 JL |
327 | * @flags: Flags to decribe features and capabilities |
328 | * @ops: The callback data structure | |
329 | * @chip: Optional: associated interrupt chip | |
330 | * @chip_data: Optional: associated interrupt chip data | |
331 | * @handler: Optional: associated interrupt flow handler | |
332 | * @handler_data: Optional: associated interrupt flow handler data | |
333 | * @handler_name: Optional: associated interrupt flow handler name | |
334 | * @data: Optional: domain specific data | |
f3cf8bb0 JL |
335 | */ |
336 | struct msi_domain_info { | |
aeeb5965 | 337 | u32 flags; |
f3cf8bb0 JL |
338 | struct msi_domain_ops *ops; |
339 | struct irq_chip *chip; | |
aeeb5965 JL |
340 | void *chip_data; |
341 | irq_flow_handler_t handler; | |
342 | void *handler_data; | |
343 | const char *handler_name; | |
f3cf8bb0 JL |
344 | void *data; |
345 | }; | |
346 | ||
aeeb5965 JL |
347 | /* Flags for msi_domain_info */ |
348 | enum { | |
349 | /* | |
350 | * Init non implemented ops callbacks with default MSI domain | |
351 | * callbacks. | |
352 | */ | |
353 | MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), | |
354 | /* | |
355 | * Init non implemented chip callbacks with default MSI chip | |
356 | * callbacks. | |
357 | */ | |
358 | MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), | |
aeeb5965 | 359 | /* Support multiple PCI MSI interrupts */ |
b6140914 | 360 | MSI_FLAG_MULTI_PCI_MSI = (1 << 2), |
aeeb5965 | 361 | /* Support PCI MSIX interrupts */ |
b6140914 | 362 | MSI_FLAG_PCI_MSIX = (1 << 3), |
f3b0946d MZ |
363 | /* Needs early activate, required for PCI */ |
364 | MSI_FLAG_ACTIVATE_EARLY = (1 << 4), | |
22d0b12f TG |
365 | /* |
366 | * Must reactivate when irq is started even when | |
367 | * MSI_FLAG_ACTIVATE_EARLY has been set. | |
368 | */ | |
369 | MSI_FLAG_MUST_REACTIVATE = (1 << 5), | |
0be8153c MZ |
370 | /* Is level-triggered capable, using two messages */ |
371 | MSI_FLAG_LEVEL_CAPABLE = (1 << 6), | |
013bd8e5 TG |
372 | /* Populate sysfs on alloc() and destroy it on free() */ |
373 | MSI_FLAG_DEV_SYSFS = (1 << 7), | |
7a823443 TG |
374 | /* MSI-X entries must be contiguous */ |
375 | MSI_FLAG_MSIX_CONTIGUOUS = (1 << 8), | |
aeeb5965 JL |
376 | }; |
377 | ||
f3cf8bb0 JL |
378 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, |
379 | bool force); | |
380 | ||
be5436c8 | 381 | struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, |
f3cf8bb0 JL |
382 | struct msi_domain_info *info, |
383 | struct irq_domain *parent); | |
43e9e705 TG |
384 | int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
385 | int nvec); | |
0f62d941 TG |
386 | int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev, |
387 | int nvec); | |
d9109698 JL |
388 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
389 | int nvec); | |
43e9e705 | 390 | void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
0f62d941 | 391 | void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev); |
d9109698 | 392 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
f3cf8bb0 JL |
393 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); |
394 | ||
be5436c8 | 395 | struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, |
c09fcc4b MZ |
396 | struct msi_domain_info *info, |
397 | struct irq_domain *parent); | |
398 | int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, | |
399 | irq_write_msi_msg_t write_msi_msg); | |
400 | void platform_msi_domain_free_irqs(struct device *dev); | |
b2eba39b MZ |
401 | |
402 | /* When an MSI domain is used as an intermediate domain */ | |
403 | int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, | |
404 | int nvec, msi_alloc_info_t *args); | |
2145ac93 MZ |
405 | int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, |
406 | int virq, int nvec, msi_alloc_info_t *args); | |
552c494a | 407 | struct irq_domain * |
1f83515b MZ |
408 | __platform_msi_create_device_domain(struct device *dev, |
409 | unsigned int nvec, | |
410 | bool is_tree, | |
411 | irq_write_msi_msg_t write_msi_msg, | |
412 | const struct irq_domain_ops *ops, | |
413 | void *host_data); | |
414 | ||
415 | #define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ | |
416 | __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) | |
417 | #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ | |
418 | __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) | |
419 | ||
9835cec6 TG |
420 | int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq, |
421 | unsigned int nr_irqs); | |
422 | void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq, | |
423 | unsigned int nvec); | |
552c494a | 424 | void *platform_msi_get_host_data(struct irq_domain *domain); |
f3cf8bb0 JL |
425 | #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ |
426 | ||
3878eaef | 427 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN |
be5436c8 | 428 | struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, |
3878eaef JL |
429 | struct msi_domain_info *info, |
430 | struct irq_domain *parent); | |
b6eec9b7 | 431 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); |
54fa97ee | 432 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); |
2fd60266 | 433 | bool pci_dev_has_special_msi_domain(struct pci_dev *pdev); |
54fa97ee MZ |
434 | #else |
435 | static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) | |
436 | { | |
437 | return NULL; | |
438 | } | |
3878eaef JL |
439 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ |
440 | ||
3b7d1921 | 441 | #endif /* LINUX_MSI_H */ |