Commit | Line | Data |
---|---|---|
f3cf8bb0 JL |
1 | /* |
2 | * linux/kernel/irq/msi.c | |
3 | * | |
4 | * Copyright (C) 2014 Intel Corp. | |
5 | * Author: Jiang Liu <jiang.liu@linux.intel.com> | |
6 | * | |
7 | * This file is licensed under GPLv2. | |
8 | * | |
9 | * This file contains common code to support Message Signalled Interrupt for | |
10 | * PCI compatible and non PCI compatible devices. | |
11 | */ | |
aeeb5965 JL |
12 | #include <linux/types.h> |
13 | #include <linux/device.h> | |
f3cf8bb0 JL |
14 | #include <linux/irq.h> |
15 | #include <linux/irqdomain.h> | |
16 | #include <linux/msi.h> | |
17 | ||
d9109698 JL |
18 | /* Temparory solution for building, will be removed later */ |
19 | #include <linux/pci.h> | |
20 | ||
aa48b6f7 JL |
21 | struct msi_desc *alloc_msi_entry(struct device *dev) |
22 | { | |
23 | struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); | |
24 | if (!desc) | |
25 | return NULL; | |
26 | ||
27 | INIT_LIST_HEAD(&desc->list); | |
28 | desc->dev = dev; | |
29 | ||
30 | return desc; | |
31 | } | |
32 | ||
33 | void free_msi_entry(struct msi_desc *entry) | |
34 | { | |
35 | kfree(entry); | |
36 | } | |
37 | ||
38b6a1cf JL |
38 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
39 | { | |
40 | *msg = entry->msg; | |
41 | } | |
42 | ||
43 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | |
44 | { | |
45 | struct msi_desc *entry = irq_get_msi_desc(irq); | |
46 | ||
47 | __get_cached_msi_msg(entry, msg); | |
48 | } | |
49 | EXPORT_SYMBOL_GPL(get_cached_msi_msg); | |
50 | ||
f3cf8bb0 | 51 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN |
74faaf7a TG |
52 | static inline void irq_chip_write_msi_msg(struct irq_data *data, |
53 | struct msi_msg *msg) | |
54 | { | |
55 | data->chip->irq_write_msi_msg(data, msg); | |
56 | } | |
57 | ||
f3cf8bb0 JL |
58 | /** |
59 | * msi_domain_set_affinity - Generic affinity setter function for MSI domains | |
60 | * @irq_data: The irq data associated to the interrupt | |
61 | * @mask: The affinity mask to set | |
62 | * @force: Flag to enforce setting (disable online checks) | |
63 | * | |
64 | * Intended to be used by MSI interrupt controllers which are | |
65 | * implemented with hierarchical domains. | |
66 | */ | |
67 | int msi_domain_set_affinity(struct irq_data *irq_data, | |
68 | const struct cpumask *mask, bool force) | |
69 | { | |
70 | struct irq_data *parent = irq_data->parent_data; | |
71 | struct msi_msg msg; | |
72 | int ret; | |
73 | ||
74 | ret = parent->chip->irq_set_affinity(parent, mask, force); | |
75 | if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { | |
76 | BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); | |
77 | irq_chip_write_msi_msg(irq_data, &msg); | |
78 | } | |
79 | ||
80 | return ret; | |
81 | } | |
82 | ||
83 | static void msi_domain_activate(struct irq_domain *domain, | |
84 | struct irq_data *irq_data) | |
85 | { | |
86 | struct msi_msg msg; | |
87 | ||
88 | BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); | |
89 | irq_chip_write_msi_msg(irq_data, &msg); | |
90 | } | |
91 | ||
92 | static void msi_domain_deactivate(struct irq_domain *domain, | |
93 | struct irq_data *irq_data) | |
94 | { | |
95 | struct msi_msg msg; | |
96 | ||
97 | memset(&msg, 0, sizeof(msg)); | |
98 | irq_chip_write_msi_msg(irq_data, &msg); | |
99 | } | |
100 | ||
101 | static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
102 | unsigned int nr_irqs, void *arg) | |
103 | { | |
104 | struct msi_domain_info *info = domain->host_data; | |
105 | struct msi_domain_ops *ops = info->ops; | |
106 | irq_hw_number_t hwirq = ops->get_hwirq(info, arg); | |
107 | int i, ret; | |
108 | ||
109 | if (irq_find_mapping(domain, hwirq) > 0) | |
110 | return -EEXIST; | |
111 | ||
bf6f869f LJ |
112 | if (domain->parent) { |
113 | ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); | |
114 | if (ret < 0) | |
115 | return ret; | |
116 | } | |
f3cf8bb0 JL |
117 | |
118 | for (i = 0; i < nr_irqs; i++) { | |
119 | ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); | |
120 | if (ret < 0) { | |
121 | if (ops->msi_free) { | |
122 | for (i--; i > 0; i--) | |
123 | ops->msi_free(domain, info, virq + i); | |
124 | } | |
125 | irq_domain_free_irqs_top(domain, virq, nr_irqs); | |
126 | return ret; | |
127 | } | |
128 | } | |
129 | ||
130 | return 0; | |
131 | } | |
132 | ||
133 | static void msi_domain_free(struct irq_domain *domain, unsigned int virq, | |
134 | unsigned int nr_irqs) | |
135 | { | |
136 | struct msi_domain_info *info = domain->host_data; | |
137 | int i; | |
138 | ||
139 | if (info->ops->msi_free) { | |
140 | for (i = 0; i < nr_irqs; i++) | |
141 | info->ops->msi_free(domain, info, virq + i); | |
142 | } | |
143 | irq_domain_free_irqs_top(domain, virq, nr_irqs); | |
144 | } | |
145 | ||
01364028 | 146 | static const struct irq_domain_ops msi_domain_ops = { |
f3cf8bb0 JL |
147 | .alloc = msi_domain_alloc, |
148 | .free = msi_domain_free, | |
149 | .activate = msi_domain_activate, | |
150 | .deactivate = msi_domain_deactivate, | |
151 | }; | |
152 | ||
aeeb5965 JL |
153 | #ifdef GENERIC_MSI_DOMAIN_OPS |
154 | static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, | |
155 | msi_alloc_info_t *arg) | |
156 | { | |
157 | return arg->hwirq; | |
158 | } | |
159 | ||
160 | static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, | |
161 | int nvec, msi_alloc_info_t *arg) | |
162 | { | |
163 | memset(arg, 0, sizeof(*arg)); | |
164 | return 0; | |
165 | } | |
166 | ||
167 | static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, | |
168 | struct msi_desc *desc) | |
169 | { | |
170 | arg->desc = desc; | |
171 | } | |
172 | #else | |
173 | #define msi_domain_ops_get_hwirq NULL | |
174 | #define msi_domain_ops_prepare NULL | |
175 | #define msi_domain_ops_set_desc NULL | |
176 | #endif /* !GENERIC_MSI_DOMAIN_OPS */ | |
177 | ||
178 | static int msi_domain_ops_init(struct irq_domain *domain, | |
179 | struct msi_domain_info *info, | |
180 | unsigned int virq, irq_hw_number_t hwirq, | |
181 | msi_alloc_info_t *arg) | |
182 | { | |
183 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, | |
184 | info->chip_data); | |
185 | if (info->handler && info->handler_name) { | |
186 | __irq_set_handler(virq, info->handler, 0, info->handler_name); | |
187 | if (info->handler_data) | |
188 | irq_set_handler_data(virq, info->handler_data); | |
189 | } | |
190 | return 0; | |
191 | } | |
192 | ||
193 | static int msi_domain_ops_check(struct irq_domain *domain, | |
194 | struct msi_domain_info *info, | |
195 | struct device *dev) | |
196 | { | |
197 | return 0; | |
198 | } | |
199 | ||
200 | static struct msi_domain_ops msi_domain_ops_default = { | |
201 | .get_hwirq = msi_domain_ops_get_hwirq, | |
202 | .msi_init = msi_domain_ops_init, | |
203 | .msi_check = msi_domain_ops_check, | |
204 | .msi_prepare = msi_domain_ops_prepare, | |
205 | .set_desc = msi_domain_ops_set_desc, | |
206 | }; | |
207 | ||
208 | static void msi_domain_update_dom_ops(struct msi_domain_info *info) | |
209 | { | |
210 | struct msi_domain_ops *ops = info->ops; | |
211 | ||
212 | if (ops == NULL) { | |
213 | info->ops = &msi_domain_ops_default; | |
214 | return; | |
215 | } | |
216 | ||
217 | if (ops->get_hwirq == NULL) | |
218 | ops->get_hwirq = msi_domain_ops_default.get_hwirq; | |
219 | if (ops->msi_init == NULL) | |
220 | ops->msi_init = msi_domain_ops_default.msi_init; | |
221 | if (ops->msi_check == NULL) | |
222 | ops->msi_check = msi_domain_ops_default.msi_check; | |
223 | if (ops->msi_prepare == NULL) | |
224 | ops->msi_prepare = msi_domain_ops_default.msi_prepare; | |
225 | if (ops->set_desc == NULL) | |
226 | ops->set_desc = msi_domain_ops_default.set_desc; | |
227 | } | |
228 | ||
229 | static void msi_domain_update_chip_ops(struct msi_domain_info *info) | |
230 | { | |
231 | struct irq_chip *chip = info->chip; | |
232 | ||
0701c53e | 233 | BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); |
aeeb5965 JL |
234 | if (!chip->irq_set_affinity) |
235 | chip->irq_set_affinity = msi_domain_set_affinity; | |
236 | } | |
237 | ||
f3cf8bb0 JL |
238 | /** |
239 | * msi_create_irq_domain - Create a MSI interrupt domain | |
be5436c8 | 240 | * @fwnode: Optional fwnode of the interrupt controller |
f3cf8bb0 JL |
241 | * @info: MSI domain info |
242 | * @parent: Parent irq domain | |
243 | */ | |
be5436c8 | 244 | struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, |
f3cf8bb0 JL |
245 | struct msi_domain_info *info, |
246 | struct irq_domain *parent) | |
247 | { | |
aeeb5965 JL |
248 | if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) |
249 | msi_domain_update_dom_ops(info); | |
250 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | |
251 | msi_domain_update_chip_ops(info); | |
f3cf8bb0 | 252 | |
be5436c8 MZ |
253 | return irq_domain_create_hierarchy(parent, 0, 0, fwnode, |
254 | &msi_domain_ops, info); | |
f3cf8bb0 JL |
255 | } |
256 | ||
b2eba39b MZ |
257 | int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, |
258 | int nvec, msi_alloc_info_t *arg) | |
259 | { | |
260 | struct msi_domain_info *info = domain->host_data; | |
261 | struct msi_domain_ops *ops = info->ops; | |
262 | int ret; | |
263 | ||
264 | ret = ops->msi_check(domain, info, dev); | |
265 | if (ret == 0) | |
266 | ret = ops->msi_prepare(domain, dev, nvec, arg); | |
267 | ||
268 | return ret; | |
269 | } | |
270 | ||
2145ac93 MZ |
271 | int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, |
272 | int virq, int nvec, msi_alloc_info_t *arg) | |
273 | { | |
274 | struct msi_domain_info *info = domain->host_data; | |
275 | struct msi_domain_ops *ops = info->ops; | |
276 | struct msi_desc *desc; | |
277 | int ret = 0; | |
278 | ||
279 | for_each_msi_entry(desc, dev) { | |
280 | /* Don't even try the multi-MSI brain damage. */ | |
281 | if (WARN_ON(!desc->irq || desc->nvec_used != 1)) { | |
282 | ret = -EINVAL; | |
283 | break; | |
284 | } | |
285 | ||
286 | if (!(desc->irq >= virq && desc->irq < (virq + nvec))) | |
287 | continue; | |
288 | ||
289 | ops->set_desc(arg, desc); | |
290 | /* Assumes the domain mutex is held! */ | |
291 | ret = irq_domain_alloc_irqs_recursive(domain, virq, 1, arg); | |
292 | if (ret) | |
293 | break; | |
294 | ||
295 | irq_set_msi_desc_off(virq, 0, desc); | |
296 | } | |
297 | ||
298 | if (ret) { | |
299 | /* Mop up the damage */ | |
300 | for_each_msi_entry(desc, dev) { | |
301 | if (!(desc->irq >= virq && desc->irq < (virq + nvec))) | |
302 | continue; | |
303 | ||
304 | irq_domain_free_irqs_common(domain, desc->irq, 1); | |
305 | } | |
306 | } | |
307 | ||
308 | return ret; | |
309 | } | |
310 | ||
d9109698 JL |
311 | /** |
312 | * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain | |
313 | * @domain: The domain to allocate from | |
314 | * @dev: Pointer to device struct of the device for which the interrupts | |
315 | * are allocated | |
316 | * @nvec: The number of interrupts to allocate | |
317 | * | |
318 | * Returns 0 on success or an error code. | |
319 | */ | |
320 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | |
321 | int nvec) | |
322 | { | |
323 | struct msi_domain_info *info = domain->host_data; | |
324 | struct msi_domain_ops *ops = info->ops; | |
325 | msi_alloc_info_t arg; | |
326 | struct msi_desc *desc; | |
327 | int i, ret, virq = -1; | |
328 | ||
b2eba39b | 329 | ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); |
d9109698 JL |
330 | if (ret) |
331 | return ret; | |
332 | ||
333 | for_each_msi_entry(desc, dev) { | |
334 | ops->set_desc(&arg, desc); | |
aeeb5965 JL |
335 | if (info->flags & MSI_FLAG_IDENTITY_MAP) |
336 | virq = (int)ops->get_hwirq(info, &arg); | |
337 | else | |
338 | virq = -1; | |
d9109698 | 339 | |
aeeb5965 | 340 | virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, |
d9109698 JL |
341 | dev_to_node(dev), &arg, false); |
342 | if (virq < 0) { | |
343 | ret = -ENOSPC; | |
344 | if (ops->handle_error) | |
345 | ret = ops->handle_error(domain, desc, ret); | |
346 | if (ops->msi_finish) | |
347 | ops->msi_finish(&arg, ret); | |
348 | return ret; | |
349 | } | |
350 | ||
351 | for (i = 0; i < desc->nvec_used; i++) | |
352 | irq_set_msi_desc_off(virq, i, desc); | |
353 | } | |
354 | ||
355 | if (ops->msi_finish) | |
356 | ops->msi_finish(&arg, 0); | |
357 | ||
358 | for_each_msi_entry(desc, dev) { | |
359 | if (desc->nvec_used == 1) | |
360 | dev_dbg(dev, "irq %d for MSI\n", virq); | |
361 | else | |
362 | dev_dbg(dev, "irq [%d-%d] for MSI\n", | |
363 | virq, virq + desc->nvec_used - 1); | |
364 | } | |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
369 | /** | |
370 | * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev | |
371 | * @domain: The domain to managing the interrupts | |
372 | * @dev: Pointer to device struct of the device for which the interrupts | |
373 | * are free | |
374 | */ | |
375 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) | |
376 | { | |
377 | struct msi_desc *desc; | |
378 | ||
379 | for_each_msi_entry(desc, dev) { | |
fe0c52fc MZ |
380 | /* |
381 | * We might have failed to allocate an MSI early | |
382 | * enough that there is no IRQ associated to this | |
383 | * entry. If that's the case, don't do anything. | |
384 | */ | |
385 | if (desc->irq) { | |
386 | irq_domain_free_irqs(desc->irq, desc->nvec_used); | |
387 | desc->irq = 0; | |
388 | } | |
d9109698 JL |
389 | } |
390 | } | |
391 | ||
f3cf8bb0 JL |
392 | /** |
393 | * msi_get_domain_info - Get the MSI interrupt domain info for @domain | |
394 | * @domain: The interrupt domain to retrieve data from | |
395 | * | |
396 | * Returns the pointer to the msi_domain_info stored in | |
397 | * @domain->host_data. | |
398 | */ | |
399 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) | |
400 | { | |
401 | return (struct msi_domain_info *)domain->host_data; | |
402 | } | |
403 | ||
404 | #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ |