Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
3b7d1921 EB |
2 | #ifndef LINUX_MSI_H |
3 | #define LINUX_MSI_H | |
4 | ||
3ba1f050 | 5 | #include <linux/cpumask.h> |
b5f687f9 | 6 | #include <linux/mutex.h> |
4aa9bc95 | 7 | #include <linux/list.h> |
8073c1ac TG |
8 | #include <asm/msi.h> |
9 | ||
10 | /* Dummy shadow structures if an architecture does not define them */ | |
11 | #ifndef arch_msi_msg_addr_lo | |
12 | typedef struct arch_msi_msg_addr_lo { | |
13 | u32 address_lo; | |
14 | } __attribute__ ((packed)) arch_msi_msg_addr_lo_t; | |
15 | #endif | |
16 | ||
17 | #ifndef arch_msi_msg_addr_hi | |
18 | typedef struct arch_msi_msg_addr_hi { | |
19 | u32 address_hi; | |
20 | } __attribute__ ((packed)) arch_msi_msg_addr_hi_t; | |
21 | #endif | |
22 | ||
23 | #ifndef arch_msi_msg_data | |
24 | typedef struct arch_msi_msg_data { | |
25 | u32 data; | |
26 | } __attribute__ ((packed)) arch_msi_msg_data_t; | |
27 | #endif | |
4aa9bc95 | 28 | |
8073c1ac TG |
29 | /** |
30 | * msi_msg - Representation of a MSI message | |
31 | * @address_lo: Low 32 bits of msi message address | |
32 | * @arch_addrlo: Architecture specific shadow of @address_lo | |
33 | * @address_hi: High 32 bits of msi message address | |
34 | * (only used when device supports it) | |
35 | * @arch_addrhi: Architecture specific shadow of @address_hi | |
36 | * @data: MSI message data (usually 16 bits) | |
37 | * @arch_data: Architecture specific shadow of @data | |
38 | */ | |
3b7d1921 | 39 | struct msi_msg { |
8073c1ac TG |
40 | union { |
41 | u32 address_lo; | |
42 | arch_msi_msg_addr_lo_t arch_addr_lo; | |
43 | }; | |
44 | union { | |
45 | u32 address_hi; | |
46 | arch_msi_msg_addr_hi_t arch_addr_hi; | |
47 | }; | |
48 | union { | |
49 | u32 data; | |
50 | arch_msi_msg_data_t arch_data; | |
51 | }; | |
3b7d1921 EB |
52 | }; |
53 | ||
38737d82 | 54 | extern int pci_msi_ignore_mask; |
c54c1879 | 55 | /* Helper functions */ |
1c9db525 | 56 | struct irq_data; |
39431acb | 57 | struct msi_desc; |
25a98bd4 | 58 | struct pci_dev; |
c09fcc4b | 59 | struct platform_msi_priv_data; |
bf6e054e TG |
60 | struct attribute_group; |
61 | ||
2366d06e | 62 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
2f44e29c | 63 | #ifdef CONFIG_GENERIC_MSI_IRQ |
2366d06e | 64 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); |
2f44e29c AB |
65 | #else |
66 | static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | |
67 | { | |
68 | } | |
69 | #endif | |
891d4a48 | 70 | |
c09fcc4b MZ |
71 | typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, |
72 | struct msi_msg *msg); | |
73 | ||
e58f2259 TG |
74 | /** |
75 | * pci_msi_desc - PCI/MSI specific MSI descriptor data | |
76 | * | |
77 | * @msi_mask: [PCI MSI] MSI cached mask bits | |
78 | * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits | |
79 | * @is_msix: [PCI MSI/X] True if MSI-X | |
80 | * @multiple: [PCI MSI/X] log2 num of messages allocated | |
81 | * @multi_cap: [PCI MSI/X] log2 num of messages supported | |
82 | * @can_mask: [PCI MSI/X] Masking supported? | |
83 | * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit | |
e58f2259 TG |
84 | * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq |
85 | * @mask_pos: [PCI MSI] Mask register position | |
86 | * @mask_base: [PCI MSI-X] Mask register base address | |
87 | */ | |
88 | struct pci_msi_desc { | |
89 | union { | |
90 | u32 msi_mask; | |
91 | u32 msix_ctrl; | |
92 | }; | |
93 | struct { | |
94 | u8 is_msix : 1; | |
95 | u8 multiple : 3; | |
96 | u8 multi_cap : 3; | |
97 | u8 can_mask : 1; | |
98 | u8 is_64 : 1; | |
99 | u8 is_virtual : 1; | |
e58f2259 TG |
100 | unsigned default_irq; |
101 | } msi_attrib; | |
102 | union { | |
103 | u8 mask_pos; | |
104 | void __iomem *mask_base; | |
105 | }; | |
106 | }; | |
107 | ||
fc88419c JL |
108 | /** |
109 | * struct msi_desc - Descriptor structure for MSI based interrupts | |
110 | * @list: List head for management | |
111 | * @irq: The base interrupt number | |
112 | * @nvec_used: The number of vectors used | |
113 | * @dev: Pointer to the device which uses this descriptor | |
114 | * @msg: The last set MSI message cached for reuse | |
0972fa57 | 115 | * @affinity: Optional pointer to a cpu affinity mask for this descriptor |
fc88419c | 116 | * |
d7cc609f LG |
117 | * @write_msi_msg: Callback that may be called when the MSI message |
118 | * address or data changes | |
119 | * @write_msi_msg_data: Data parameter for the callback. | |
120 | * | |
20c6d424 | 121 | * @msi_index: Index of the msi descriptor |
0f180958 | 122 | * @pci: PCI specific msi descriptor data |
fc88419c | 123 | */ |
3b7d1921 | 124 | struct msi_desc { |
fc88419c JL |
125 | /* Shared device/bus type independent data */ |
126 | struct list_head list; | |
127 | unsigned int irq; | |
128 | unsigned int nvec_used; | |
129 | struct device *dev; | |
130 | struct msi_msg msg; | |
bec04037 | 131 | struct irq_affinity_desc *affinity; |
aaebdf8d JG |
132 | #ifdef CONFIG_IRQ_MSI_IOMMU |
133 | const void *iommu_cookie; | |
134 | #endif | |
3b7d1921 | 135 | |
d7cc609f LG |
136 | void (*write_msi_msg)(struct msi_desc *entry, void *data); |
137 | void *write_msi_msg_data; | |
138 | ||
20c6d424 | 139 | u16 msi_index; |
0f180958 | 140 | struct pci_msi_desc pci; |
3b7d1921 EB |
141 | }; |
142 | ||
1046f71d TG |
143 | /* |
144 | * Filter values for the MSI descriptor iterators and accessor functions. | |
145 | */ | |
146 | enum msi_desc_filter { | |
147 | /* All descriptors */ | |
148 | MSI_DESC_ALL, | |
149 | /* Descriptors which have no interrupt associated */ | |
150 | MSI_DESC_NOTASSOCIATED, | |
151 | /* Descriptors which have an interrupt associated */ | |
152 | MSI_DESC_ASSOCIATED, | |
153 | }; | |
154 | ||
013bd8e5 TG |
155 | /** |
156 | * msi_device_data - MSI per device data | |
157 | * @properties: MSI properties which are interesting to drivers | |
bf6e054e | 158 | * @attrs: Pointer to the sysfs attribute group |
fc22e7db | 159 | * @platform_data: Platform-MSI specific data |
125282cd | 160 | * @list: List of MSI descriptors associated to the device |
b5f687f9 | 161 | * @mutex: Mutex protecting the MSI list |
1046f71d | 162 | * @__next: Cached pointer to the next entry for iterators |
013bd8e5 TG |
163 | */ |
164 | struct msi_device_data { | |
165 | unsigned long properties; | |
bf6e054e | 166 | const struct attribute_group **attrs; |
fc22e7db | 167 | struct platform_msi_priv_data *platform_data; |
125282cd | 168 | struct list_head list; |
b5f687f9 | 169 | struct mutex mutex; |
1046f71d | 170 | struct msi_desc *__next; |
013bd8e5 TG |
171 | }; |
172 | ||
173 | int msi_setup_device_data(struct device *dev); | |
174 | ||
cf15f43a | 175 | unsigned int msi_get_virq(struct device *dev, unsigned int index); |
b5f687f9 TG |
176 | void msi_lock_descs(struct device *dev); |
177 | void msi_unlock_descs(struct device *dev); | |
cf15f43a | 178 | |
1046f71d TG |
179 | struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter); |
180 | struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter); | |
181 | ||
182 | /** | |
183 | * msi_for_each_desc - Iterate the MSI descriptors | |
184 | * | |
185 | * @desc: struct msi_desc pointer used as iterator | |
186 | * @dev: struct device pointer - device to iterate | |
187 | * @filter: Filter for descriptor selection | |
188 | * | |
189 | * Notes: | |
190 | * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs() | |
191 | * pair. | |
192 | * - It is safe to remove a retrieved MSI descriptor in the loop. | |
193 | */ | |
194 | #define msi_for_each_desc(desc, dev, filter) \ | |
195 | for ((desc) = msi_first_desc((dev), (filter)); (desc); \ | |
196 | (desc) = msi_next_desc((dev), (filter))) | |
197 | ||
d31eb342 | 198 | /* Helpers to hide struct msi_desc implementation details */ |
25a98bd4 | 199 | #define msi_desc_to_dev(desc) ((desc)->dev) |
125282cd | 200 | #define dev_to_msi_list(dev) (&(dev)->msi.data->list) |
d31eb342 JL |
201 | #define first_msi_entry(dev) \ |
202 | list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) | |
203 | #define for_each_msi_entry(desc, dev) \ | |
204 | list_for_each_entry((desc), dev_to_msi_list((dev)), list) | |
81b1e6e6 MR |
205 | #define for_each_msi_entry_safe(desc, tmp, dev) \ |
206 | list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) | |
4c457e8c MZ |
207 | #define for_each_msi_vector(desc, __irq, dev) \ |
208 | for_each_msi_entry((desc), (dev)) \ | |
209 | if ((desc)->irq) \ | |
210 | for (__irq = (desc)->irq; \ | |
211 | __irq < ((desc)->irq + (desc)->nvec_used); \ | |
212 | __irq++) | |
d31eb342 | 213 | |
aaebdf8d JG |
214 | #ifdef CONFIG_IRQ_MSI_IOMMU |
215 | static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) | |
216 | { | |
217 | return desc->iommu_cookie; | |
218 | } | |
219 | ||
220 | static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, | |
221 | const void *iommu_cookie) | |
222 | { | |
223 | desc->iommu_cookie = iommu_cookie; | |
224 | } | |
225 | #else | |
226 | static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) | |
227 | { | |
228 | return NULL; | |
229 | } | |
230 | ||
231 | static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, | |
232 | const void *iommu_cookie) | |
233 | { | |
234 | } | |
235 | #endif | |
236 | ||
d31eb342 JL |
237 | #ifdef CONFIG_PCI_MSI |
238 | #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) | |
239 | #define for_each_pci_msi_entry(desc, pdev) \ | |
240 | for_each_msi_entry((desc), &(pdev)->dev) | |
241 | ||
25a98bd4 | 242 | struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); |
2f44e29c | 243 | void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); |
c179c9b9 | 244 | #else /* CONFIG_PCI_MSI */ |
2f44e29c AB |
245 | static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) |
246 | { | |
247 | } | |
d31eb342 JL |
248 | #endif /* CONFIG_PCI_MSI */ |
249 | ||
60290525 TG |
250 | int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc); |
251 | ||
28f4b041 | 252 | struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, |
bec04037 | 253 | const struct irq_affinity_desc *affinity); |
aa48b6f7 | 254 | void free_msi_entry(struct msi_desc *entry); |
891d4a48 | 255 | void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
83a18912 | 256 | void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
83a18912 | 257 | |
23ed8d57 TG |
258 | void pci_msi_mask_irq(struct irq_data *data); |
259 | void pci_msi_unmask_irq(struct irq_data *data); | |
260 | ||
1197528a | 261 | #ifdef CONFIG_SYSFS |
013bd8e5 TG |
262 | int msi_device_populate_sysfs(struct device *dev); |
263 | void msi_device_destroy_sysfs(struct device *dev); | |
24cff375 | 264 | #else /* CONFIG_SYSFS */ |
013bd8e5 TG |
265 | static inline int msi_device_populate_sysfs(struct device *dev) { return 0; } |
266 | static inline void msi_device_destroy_sysfs(struct device *dev) { } | |
24cff375 | 267 | #endif /* !CONFIG_SYSFS */ |
2f170814 | 268 | |
3b7d1921 | 269 | /* |
077ee78e TG |
270 | * The arch hooks to setup up msi irqs. Default functions are implemented |
271 | * as weak symbols so that they /can/ be overriden by architecture specific | |
b227be0d | 272 | * code if needed. These hooks can only be enabled by the architecture. |
077ee78e TG |
273 | * |
274 | * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by | |
275 | * stubs with warnings. | |
3b7d1921 | 276 | */ |
077ee78e | 277 | #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS |
f7feaca7 | 278 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); |
3b7d1921 | 279 | void arch_teardown_msi_irq(unsigned int irq); |
2366d06e BH |
280 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
281 | void arch_teardown_msi_irqs(struct pci_dev *dev); | |
24cff375 | 282 | #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ |
077ee78e TG |
283 | |
284 | /* | |
ae72f315 TG |
285 | * The restore hook is still available even for fully irq domain based |
286 | * setups. Courtesy to XEN/X86. | |
077ee78e | 287 | */ |
ae72f315 | 288 | bool arch_restore_msi_irqs(struct pci_dev *dev); |
3b7d1921 | 289 | |
f3cf8bb0 | 290 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN |
d9109698 | 291 | |
aeeb5965 | 292 | #include <linux/irqhandler.h> |
d9109698 | 293 | |
f3cf8bb0 | 294 | struct irq_domain; |
552c494a | 295 | struct irq_domain_ops; |
f3cf8bb0 JL |
296 | struct irq_chip; |
297 | struct device_node; | |
be5436c8 | 298 | struct fwnode_handle; |
f3cf8bb0 JL |
299 | struct msi_domain_info; |
300 | ||
301 | /** | |
302 | * struct msi_domain_ops - MSI interrupt domain callbacks | |
303 | * @get_hwirq: Retrieve the resulting hw irq number | |
304 | * @msi_init: Domain specific init function for MSI interrupts | |
305 | * @msi_free: Domain specific function to free a MSI interrupts | |
d9109698 JL |
306 | * @msi_check: Callback for verification of the domain/info/dev data |
307 | * @msi_prepare: Prepare the allocation of the interrupts in the domain | |
d9109698 | 308 | * @set_desc: Set the msi descriptor for an interrupt |
43e9e705 TG |
309 | * @domain_alloc_irqs: Optional function to override the default allocation |
310 | * function. | |
311 | * @domain_free_irqs: Optional function to override the default free | |
312 | * function. | |
d9109698 | 313 | * |
1dd2c6a0 TG |
314 | * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying |
315 | * irqdomain. | |
d9109698 | 316 | * |
89033762 | 317 | * @msi_check, @msi_prepare and @set_desc are callbacks used by |
1dd2c6a0 | 318 | * msi_domain_alloc/free_irqs(). |
43e9e705 TG |
319 | * |
320 | * @domain_alloc_irqs, @domain_free_irqs can be used to override the | |
321 | * default allocation/free functions (__msi_domain_alloc/free_irqs). This | |
322 | * is initially for a wrapper around XENs seperate MSI universe which can't | |
323 | * be wrapped into the regular irq domains concepts by mere mortals. This | |
324 | * allows to universally use msi_domain_alloc/free_irqs without having to | |
325 | * special case XEN all over the place. | |
326 | * | |
327 | * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs | |
328 | * are set to the default implementation if NULL and even when | |
329 | * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and | |
330 | * because these callbacks are obviously mandatory. | |
331 | * | |
332 | * This is NOT meant to be abused, but it can be useful to build wrappers | |
333 | * for specialized MSI irq domains which need extra work before and after | |
334 | * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs(). | |
f3cf8bb0 JL |
335 | */ |
336 | struct msi_domain_ops { | |
aeeb5965 JL |
337 | irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, |
338 | msi_alloc_info_t *arg); | |
f3cf8bb0 JL |
339 | int (*msi_init)(struct irq_domain *domain, |
340 | struct msi_domain_info *info, | |
341 | unsigned int virq, irq_hw_number_t hwirq, | |
aeeb5965 | 342 | msi_alloc_info_t *arg); |
f3cf8bb0 JL |
343 | void (*msi_free)(struct irq_domain *domain, |
344 | struct msi_domain_info *info, | |
345 | unsigned int virq); | |
d9109698 JL |
346 | int (*msi_check)(struct irq_domain *domain, |
347 | struct msi_domain_info *info, | |
348 | struct device *dev); | |
349 | int (*msi_prepare)(struct irq_domain *domain, | |
350 | struct device *dev, int nvec, | |
351 | msi_alloc_info_t *arg); | |
d9109698 JL |
352 | void (*set_desc)(msi_alloc_info_t *arg, |
353 | struct msi_desc *desc); | |
43e9e705 TG |
354 | int (*domain_alloc_irqs)(struct irq_domain *domain, |
355 | struct device *dev, int nvec); | |
356 | void (*domain_free_irqs)(struct irq_domain *domain, | |
357 | struct device *dev); | |
f3cf8bb0 JL |
358 | }; |
359 | ||
360 | /** | |
361 | * struct msi_domain_info - MSI interrupt domain data | |
aeeb5965 JL |
362 | * @flags: Flags to decribe features and capabilities |
363 | * @ops: The callback data structure | |
364 | * @chip: Optional: associated interrupt chip | |
365 | * @chip_data: Optional: associated interrupt chip data | |
366 | * @handler: Optional: associated interrupt flow handler | |
367 | * @handler_data: Optional: associated interrupt flow handler data | |
368 | * @handler_name: Optional: associated interrupt flow handler name | |
369 | * @data: Optional: domain specific data | |
f3cf8bb0 JL |
370 | */ |
371 | struct msi_domain_info { | |
aeeb5965 | 372 | u32 flags; |
f3cf8bb0 JL |
373 | struct msi_domain_ops *ops; |
374 | struct irq_chip *chip; | |
aeeb5965 JL |
375 | void *chip_data; |
376 | irq_flow_handler_t handler; | |
377 | void *handler_data; | |
378 | const char *handler_name; | |
f3cf8bb0 JL |
379 | void *data; |
380 | }; | |
381 | ||
aeeb5965 JL |
382 | /* Flags for msi_domain_info */ |
383 | enum { | |
384 | /* | |
385 | * Init non implemented ops callbacks with default MSI domain | |
386 | * callbacks. | |
387 | */ | |
388 | MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), | |
389 | /* | |
390 | * Init non implemented chip callbacks with default MSI chip | |
391 | * callbacks. | |
392 | */ | |
393 | MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), | |
aeeb5965 | 394 | /* Support multiple PCI MSI interrupts */ |
b6140914 | 395 | MSI_FLAG_MULTI_PCI_MSI = (1 << 2), |
aeeb5965 | 396 | /* Support PCI MSIX interrupts */ |
b6140914 | 397 | MSI_FLAG_PCI_MSIX = (1 << 3), |
f3b0946d MZ |
398 | /* Needs early activate, required for PCI */ |
399 | MSI_FLAG_ACTIVATE_EARLY = (1 << 4), | |
22d0b12f TG |
400 | /* |
401 | * Must reactivate when irq is started even when | |
402 | * MSI_FLAG_ACTIVATE_EARLY has been set. | |
403 | */ | |
404 | MSI_FLAG_MUST_REACTIVATE = (1 << 5), | |
0be8153c MZ |
405 | /* Is level-triggered capable, using two messages */ |
406 | MSI_FLAG_LEVEL_CAPABLE = (1 << 6), | |
013bd8e5 TG |
407 | /* Populate sysfs on alloc() and destroy it on free() */ |
408 | MSI_FLAG_DEV_SYSFS = (1 << 7), | |
7a823443 TG |
409 | /* MSI-X entries must be contiguous */ |
410 | MSI_FLAG_MSIX_CONTIGUOUS = (1 << 8), | |
aeeb5965 JL |
411 | }; |
412 | ||
f3cf8bb0 JL |
413 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, |
414 | bool force); | |
415 | ||
be5436c8 | 416 | struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, |
f3cf8bb0 JL |
417 | struct msi_domain_info *info, |
418 | struct irq_domain *parent); | |
43e9e705 TG |
419 | int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
420 | int nvec); | |
0f62d941 TG |
421 | int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev, |
422 | int nvec); | |
d9109698 JL |
423 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
424 | int nvec); | |
43e9e705 | 425 | void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
0f62d941 | 426 | void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev); |
d9109698 | 427 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
f3cf8bb0 JL |
428 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); |
429 | ||
be5436c8 | 430 | struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, |
c09fcc4b MZ |
431 | struct msi_domain_info *info, |
432 | struct irq_domain *parent); | |
433 | int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, | |
434 | irq_write_msi_msg_t write_msi_msg); | |
435 | void platform_msi_domain_free_irqs(struct device *dev); | |
b2eba39b MZ |
436 | |
437 | /* When an MSI domain is used as an intermediate domain */ | |
438 | int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, | |
439 | int nvec, msi_alloc_info_t *args); | |
2145ac93 MZ |
440 | int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, |
441 | int virq, int nvec, msi_alloc_info_t *args); | |
552c494a | 442 | struct irq_domain * |
1f83515b MZ |
443 | __platform_msi_create_device_domain(struct device *dev, |
444 | unsigned int nvec, | |
445 | bool is_tree, | |
446 | irq_write_msi_msg_t write_msi_msg, | |
447 | const struct irq_domain_ops *ops, | |
448 | void *host_data); | |
449 | ||
450 | #define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ | |
451 | __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) | |
452 | #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ | |
453 | __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) | |
454 | ||
9835cec6 TG |
455 | int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq, |
456 | unsigned int nr_irqs); | |
457 | void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq, | |
458 | unsigned int nvec); | |
552c494a | 459 | void *platform_msi_get_host_data(struct irq_domain *domain); |
f3cf8bb0 JL |
460 | #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ |
461 | ||
3878eaef | 462 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN |
be5436c8 | 463 | struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, |
3878eaef JL |
464 | struct msi_domain_info *info, |
465 | struct irq_domain *parent); | |
b6eec9b7 | 466 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); |
54fa97ee | 467 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); |
2fd60266 | 468 | bool pci_dev_has_special_msi_domain(struct pci_dev *pdev); |
54fa97ee MZ |
469 | #else |
470 | static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) | |
471 | { | |
472 | return NULL; | |
473 | } | |
3878eaef JL |
474 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ |
475 | ||
3b7d1921 | 476 | #endif /* LINUX_MSI_H */ |