Commit | Line | Data |
---|---|---|
89e1f7d4 AW |
1 | /* |
2 | * VFIO PCI interrupt handling | |
3 | * | |
4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * Derived from original vfio: | |
12 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
13 | * Author: Tom Lyon, pugs@cisco.com | |
14 | */ | |
15 | ||
16 | #include <linux/device.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/eventfd.h> | |
b8f02af0 | 19 | #include <linux/msi.h> |
89e1f7d4 AW |
20 | #include <linux/pci.h> |
21 | #include <linux/file.h> | |
89e1f7d4 AW |
22 | #include <linux/vfio.h> |
23 | #include <linux/wait.h> | |
25e9789d | 24 | #include <linux/slab.h> |
89e1f7d4 AW |
25 | |
26 | #include "vfio_pci_private.h" | |
27 | ||
89e1f7d4 AW |
28 | /* |
29 | * INTx | |
30 | */ | |
09bbcb88 | 31 | static void vfio_send_intx_eventfd(void *opaque, void *unused) |
89e1f7d4 | 32 | { |
09bbcb88 AM |
33 | struct vfio_pci_device *vdev = opaque; |
34 | ||
89e1f7d4 AW |
35 | if (likely(is_intx(vdev) && !vdev->virq_disabled)) |
36 | eventfd_signal(vdev->ctx[0].trigger, 1); | |
37 | } | |
38 | ||
39 | void vfio_pci_intx_mask(struct vfio_pci_device *vdev) | |
40 | { | |
41 | struct pci_dev *pdev = vdev->pdev; | |
42 | unsigned long flags; | |
43 | ||
44 | spin_lock_irqsave(&vdev->irqlock, flags); | |
45 | ||
46 | /* | |
47 | * Masking can come from interrupt, ioctl, or config space | |
48 | * via INTx disable. The latter means this can get called | |
49 | * even when not using intx delivery. In this case, just | |
50 | * try to have the physical bit follow the virtual bit. | |
51 | */ | |
52 | if (unlikely(!is_intx(vdev))) { | |
53 | if (vdev->pci_2_3) | |
54 | pci_intx(pdev, 0); | |
55 | } else if (!vdev->ctx[0].masked) { | |
56 | /* | |
57 | * Can't use check_and_mask here because we always want to | |
58 | * mask, not just when something is pending. | |
59 | */ | |
60 | if (vdev->pci_2_3) | |
61 | pci_intx(pdev, 0); | |
62 | else | |
63 | disable_irq_nosync(pdev->irq); | |
64 | ||
65 | vdev->ctx[0].masked = true; | |
66 | } | |
67 | ||
68 | spin_unlock_irqrestore(&vdev->irqlock, flags); | |
69 | } | |
70 | ||
71 | /* | |
72 | * If this is triggered by an eventfd, we can't call eventfd_signal | |
73 | * or else we'll deadlock on the eventfd wait queue. Return >0 when | |
74 | * a signal is necessary, which can then be handled via a work queue | |
75 | * or directly depending on the caller. | |
76 | */ | |
09bbcb88 | 77 | static int vfio_pci_intx_unmask_handler(void *opaque, void *unused) |
89e1f7d4 | 78 | { |
09bbcb88 | 79 | struct vfio_pci_device *vdev = opaque; |
89e1f7d4 AW |
80 | struct pci_dev *pdev = vdev->pdev; |
81 | unsigned long flags; | |
82 | int ret = 0; | |
83 | ||
84 | spin_lock_irqsave(&vdev->irqlock, flags); | |
85 | ||
86 | /* | |
87 | * Unmasking comes from ioctl or config, so again, have the | |
88 | * physical bit follow the virtual even when not using INTx. | |
89 | */ | |
90 | if (unlikely(!is_intx(vdev))) { | |
91 | if (vdev->pci_2_3) | |
92 | pci_intx(pdev, 1); | |
93 | } else if (vdev->ctx[0].masked && !vdev->virq_disabled) { | |
94 | /* | |
95 | * A pending interrupt here would immediately trigger, | |
96 | * but we can avoid that overhead by just re-sending | |
97 | * the interrupt to the user. | |
98 | */ | |
99 | if (vdev->pci_2_3) { | |
100 | if (!pci_check_and_unmask_intx(pdev)) | |
101 | ret = 1; | |
102 | } else | |
103 | enable_irq(pdev->irq); | |
104 | ||
105 | vdev->ctx[0].masked = (ret > 0); | |
106 | } | |
107 | ||
108 | spin_unlock_irqrestore(&vdev->irqlock, flags); | |
109 | ||
110 | return ret; | |
111 | } | |
112 | ||
113 | void vfio_pci_intx_unmask(struct vfio_pci_device *vdev) | |
114 | { | |
115 | if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) | |
116 | vfio_send_intx_eventfd(vdev, NULL); | |
117 | } | |
118 | ||
119 | static irqreturn_t vfio_intx_handler(int irq, void *dev_id) | |
120 | { | |
121 | struct vfio_pci_device *vdev = dev_id; | |
122 | unsigned long flags; | |
123 | int ret = IRQ_NONE; | |
124 | ||
125 | spin_lock_irqsave(&vdev->irqlock, flags); | |
126 | ||
127 | if (!vdev->pci_2_3) { | |
128 | disable_irq_nosync(vdev->pdev->irq); | |
129 | vdev->ctx[0].masked = true; | |
130 | ret = IRQ_HANDLED; | |
131 | } else if (!vdev->ctx[0].masked && /* may be shared */ | |
132 | pci_check_and_mask_intx(vdev->pdev)) { | |
133 | vdev->ctx[0].masked = true; | |
134 | ret = IRQ_HANDLED; | |
135 | } | |
136 | ||
137 | spin_unlock_irqrestore(&vdev->irqlock, flags); | |
138 | ||
139 | if (ret == IRQ_HANDLED) | |
140 | vfio_send_intx_eventfd(vdev, NULL); | |
141 | ||
142 | return ret; | |
143 | } | |
144 | ||
145 | static int vfio_intx_enable(struct vfio_pci_device *vdev) | |
146 | { | |
147 | if (!is_irq_none(vdev)) | |
148 | return -EINVAL; | |
149 | ||
150 | if (!vdev->pdev->irq) | |
151 | return -ENODEV; | |
152 | ||
153 | vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); | |
154 | if (!vdev->ctx) | |
155 | return -ENOMEM; | |
156 | ||
157 | vdev->num_ctx = 1; | |
899649b7 AW |
158 | |
159 | /* | |
160 | * If the virtual interrupt is masked, restore it. Devices | |
161 | * supporting DisINTx can be masked at the hardware level | |
162 | * here, non-PCI-2.3 devices will have to wait until the | |
163 | * interrupt is enabled. | |
164 | */ | |
165 | vdev->ctx[0].masked = vdev->virq_disabled; | |
166 | if (vdev->pci_2_3) | |
167 | pci_intx(vdev->pdev, !vdev->ctx[0].masked); | |
168 | ||
89e1f7d4 AW |
169 | vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; |
170 | ||
171 | return 0; | |
172 | } | |
173 | ||
174 | static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) | |
175 | { | |
176 | struct pci_dev *pdev = vdev->pdev; | |
177 | unsigned long irqflags = IRQF_SHARED; | |
178 | struct eventfd_ctx *trigger; | |
179 | unsigned long flags; | |
180 | int ret; | |
181 | ||
182 | if (vdev->ctx[0].trigger) { | |
183 | free_irq(pdev->irq, vdev); | |
184 | kfree(vdev->ctx[0].name); | |
185 | eventfd_ctx_put(vdev->ctx[0].trigger); | |
186 | vdev->ctx[0].trigger = NULL; | |
187 | } | |
188 | ||
189 | if (fd < 0) /* Disable only */ | |
190 | return 0; | |
191 | ||
192 | vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", | |
193 | pci_name(pdev)); | |
194 | if (!vdev->ctx[0].name) | |
195 | return -ENOMEM; | |
196 | ||
197 | trigger = eventfd_ctx_fdget(fd); | |
198 | if (IS_ERR(trigger)) { | |
199 | kfree(vdev->ctx[0].name); | |
200 | return PTR_ERR(trigger); | |
201 | } | |
202 | ||
9dbdfd23 AW |
203 | vdev->ctx[0].trigger = trigger; |
204 | ||
89e1f7d4 AW |
205 | if (!vdev->pci_2_3) |
206 | irqflags = 0; | |
207 | ||
208 | ret = request_irq(pdev->irq, vfio_intx_handler, | |
209 | irqflags, vdev->ctx[0].name, vdev); | |
210 | if (ret) { | |
9dbdfd23 | 211 | vdev->ctx[0].trigger = NULL; |
89e1f7d4 AW |
212 | kfree(vdev->ctx[0].name); |
213 | eventfd_ctx_put(trigger); | |
214 | return ret; | |
215 | } | |
216 | ||
89e1f7d4 AW |
217 | /* |
218 | * INTx disable will stick across the new irq setup, | |
219 | * disable_irq won't. | |
220 | */ | |
221 | spin_lock_irqsave(&vdev->irqlock, flags); | |
899649b7 | 222 | if (!vdev->pci_2_3 && vdev->ctx[0].masked) |
89e1f7d4 AW |
223 | disable_irq_nosync(pdev->irq); |
224 | spin_unlock_irqrestore(&vdev->irqlock, flags); | |
225 | ||
226 | return 0; | |
227 | } | |
228 | ||
229 | static void vfio_intx_disable(struct vfio_pci_device *vdev) | |
230 | { | |
9269c393 AM |
231 | vfio_virqfd_disable(&vdev->ctx[0].unmask); |
232 | vfio_virqfd_disable(&vdev->ctx[0].mask); | |
956b56a9 | 233 | vfio_intx_set_signal(vdev, -1); |
89e1f7d4 AW |
234 | vdev->irq_type = VFIO_PCI_NUM_IRQS; |
235 | vdev->num_ctx = 0; | |
236 | kfree(vdev->ctx); | |
237 | } | |
238 | ||
239 | /* | |
240 | * MSI/MSI-X | |
241 | */ | |
242 | static irqreturn_t vfio_msihandler(int irq, void *arg) | |
243 | { | |
244 | struct eventfd_ctx *trigger = arg; | |
245 | ||
246 | eventfd_signal(trigger, 1); | |
247 | return IRQ_HANDLED; | |
248 | } | |
249 | ||
250 | static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) | |
251 | { | |
252 | struct pci_dev *pdev = vdev->pdev; | |
61771468 | 253 | unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; |
89e1f7d4 AW |
254 | int ret; |
255 | ||
256 | if (!is_irq_none(vdev)) | |
257 | return -EINVAL; | |
258 | ||
259 | vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); | |
260 | if (!vdev->ctx) | |
261 | return -ENOMEM; | |
262 | ||
61771468 CH |
263 | /* return the number of supported vectors if we can't get all: */ |
264 | ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); | |
265 | if (ret < nvec) { | |
266 | if (ret > 0) | |
267 | pci_free_irq_vectors(pdev); | |
268 | kfree(vdev->ctx); | |
269 | return ret; | |
89e1f7d4 AW |
270 | } |
271 | ||
272 | vdev->num_ctx = nvec; | |
273 | vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : | |
274 | VFIO_PCI_MSI_IRQ_INDEX; | |
275 | ||
276 | if (!msix) { | |
277 | /* | |
278 | * Compute the virtual hardware field for max msi vectors - | |
279 | * it is the log base 2 of the number of vectors. | |
280 | */ | |
281 | vdev->msi_qmax = fls(nvec * 2 - 1) - 1; | |
282 | } | |
283 | ||
284 | return 0; | |
285 | } | |
286 | ||
287 | static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, | |
288 | int vector, int fd, bool msix) | |
289 | { | |
290 | struct pci_dev *pdev = vdev->pdev; | |
89e1f7d4 | 291 | struct eventfd_ctx *trigger; |
b95d9305 | 292 | int irq, ret; |
89e1f7d4 | 293 | |
b95d9305 | 294 | if (vector < 0 || vector >= vdev->num_ctx) |
89e1f7d4 AW |
295 | return -EINVAL; |
296 | ||
61771468 | 297 | irq = pci_irq_vector(pdev, vector); |
b95d9305 | 298 | |
89e1f7d4 AW |
299 | if (vdev->ctx[vector].trigger) { |
300 | free_irq(irq, vdev->ctx[vector].trigger); | |
6d7425f1 | 301 | irq_bypass_unregister_producer(&vdev->ctx[vector].producer); |
89e1f7d4 AW |
302 | kfree(vdev->ctx[vector].name); |
303 | eventfd_ctx_put(vdev->ctx[vector].trigger); | |
304 | vdev->ctx[vector].trigger = NULL; | |
305 | } | |
306 | ||
307 | if (fd < 0) | |
308 | return 0; | |
309 | ||
b95d9305 AW |
310 | vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)", |
311 | msix ? "x" : "", vector, | |
312 | pci_name(pdev)); | |
89e1f7d4 AW |
313 | if (!vdev->ctx[vector].name) |
314 | return -ENOMEM; | |
315 | ||
316 | trigger = eventfd_ctx_fdget(fd); | |
317 | if (IS_ERR(trigger)) { | |
318 | kfree(vdev->ctx[vector].name); | |
319 | return PTR_ERR(trigger); | |
320 | } | |
321 | ||
b8f02af0 GS |
322 | /* |
323 | * The MSIx vector table resides in device memory which may be cleared | |
324 | * via backdoor resets. We don't allow direct access to the vector | |
325 | * table so even if a userspace driver attempts to save/restore around | |
326 | * such a reset it would be unsuccessful. To avoid this, restore the | |
327 | * cached value of the message prior to enabling. | |
328 | */ | |
329 | if (msix) { | |
330 | struct msi_msg msg; | |
331 | ||
332 | get_cached_msi_msg(irq, &msg); | |
83a18912 | 333 | pci_write_msi_msg(irq, &msg); |
b8f02af0 GS |
334 | } |
335 | ||
89e1f7d4 AW |
336 | ret = request_irq(irq, vfio_msihandler, 0, |
337 | vdev->ctx[vector].name, trigger); | |
338 | if (ret) { | |
339 | kfree(vdev->ctx[vector].name); | |
340 | eventfd_ctx_put(trigger); | |
341 | return ret; | |
342 | } | |
343 | ||
6d7425f1 FW |
344 | vdev->ctx[vector].producer.token = trigger; |
345 | vdev->ctx[vector].producer.irq = irq; | |
346 | ret = irq_bypass_register_producer(&vdev->ctx[vector].producer); | |
347 | if (unlikely(ret)) | |
348 | dev_info(&pdev->dev, | |
349 | "irq bypass producer (token %p) registration fails: %d\n", | |
350 | vdev->ctx[vector].producer.token, ret); | |
351 | ||
89e1f7d4 AW |
352 | vdev->ctx[vector].trigger = trigger; |
353 | ||
354 | return 0; | |
355 | } | |
356 | ||
357 | static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start, | |
358 | unsigned count, int32_t *fds, bool msix) | |
359 | { | |
360 | int i, j, ret = 0; | |
361 | ||
b95d9305 | 362 | if (start >= vdev->num_ctx || start + count > vdev->num_ctx) |
89e1f7d4 AW |
363 | return -EINVAL; |
364 | ||
365 | for (i = 0, j = start; i < count && !ret; i++, j++) { | |
366 | int fd = fds ? fds[i] : -1; | |
367 | ret = vfio_msi_set_vector_signal(vdev, j, fd, msix); | |
368 | } | |
369 | ||
370 | if (ret) { | |
b95d9305 | 371 | for (--j; j >= (int)start; j--) |
89e1f7d4 AW |
372 | vfio_msi_set_vector_signal(vdev, j, -1, msix); |
373 | } | |
374 | ||
375 | return ret; | |
376 | } | |
377 | ||
378 | static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) | |
379 | { | |
380 | struct pci_dev *pdev = vdev->pdev; | |
381 | int i; | |
382 | ||
89e1f7d4 | 383 | for (i = 0; i < vdev->num_ctx; i++) { |
9269c393 AM |
384 | vfio_virqfd_disable(&vdev->ctx[i].unmask); |
385 | vfio_virqfd_disable(&vdev->ctx[i].mask); | |
89e1f7d4 AW |
386 | } |
387 | ||
956b56a9 AW |
388 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); |
389 | ||
61771468 | 390 | pci_free_irq_vectors(pdev); |
89e1f7d4 | 391 | |
c93a97ee AW |
392 | /* |
393 | * Both disable paths above use pci_intx_for_msi() to clear DisINTx | |
394 | * via their shutdown paths. Restore for NoINTx devices. | |
395 | */ | |
396 | if (vdev->nointx) | |
397 | pci_intx(pdev, 0); | |
398 | ||
89e1f7d4 AW |
399 | vdev->irq_type = VFIO_PCI_NUM_IRQS; |
400 | vdev->num_ctx = 0; | |
401 | kfree(vdev->ctx); | |
402 | } | |
403 | ||
404 | /* | |
405 | * IOCTL support | |
406 | */ | |
407 | static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev, | |
408 | unsigned index, unsigned start, | |
409 | unsigned count, uint32_t flags, void *data) | |
410 | { | |
411 | if (!is_intx(vdev) || start != 0 || count != 1) | |
412 | return -EINVAL; | |
413 | ||
414 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | |
415 | vfio_pci_intx_unmask(vdev); | |
416 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | |
417 | uint8_t unmask = *(uint8_t *)data; | |
418 | if (unmask) | |
419 | vfio_pci_intx_unmask(vdev); | |
420 | } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
421 | int32_t fd = *(int32_t *)data; | |
422 | if (fd >= 0) | |
09bbcb88 | 423 | return vfio_virqfd_enable((void *) vdev, |
bdc5e102 AM |
424 | vfio_pci_intx_unmask_handler, |
425 | vfio_send_intx_eventfd, NULL, | |
426 | &vdev->ctx[0].unmask, fd); | |
89e1f7d4 | 427 | |
9269c393 | 428 | vfio_virqfd_disable(&vdev->ctx[0].unmask); |
89e1f7d4 AW |
429 | } |
430 | ||
431 | return 0; | |
432 | } | |
433 | ||
434 | static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev, | |
435 | unsigned index, unsigned start, | |
436 | unsigned count, uint32_t flags, void *data) | |
437 | { | |
438 | if (!is_intx(vdev) || start != 0 || count != 1) | |
439 | return -EINVAL; | |
440 | ||
441 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | |
442 | vfio_pci_intx_mask(vdev); | |
443 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | |
444 | uint8_t mask = *(uint8_t *)data; | |
445 | if (mask) | |
446 | vfio_pci_intx_mask(vdev); | |
447 | } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
448 | return -ENOTTY; /* XXX implement me */ | |
449 | } | |
450 | ||
451 | return 0; | |
452 | } | |
453 | ||
454 | static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev, | |
455 | unsigned index, unsigned start, | |
456 | unsigned count, uint32_t flags, void *data) | |
457 | { | |
458 | if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { | |
459 | vfio_intx_disable(vdev); | |
460 | return 0; | |
461 | } | |
462 | ||
463 | if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1) | |
464 | return -EINVAL; | |
465 | ||
466 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
467 | int32_t fd = *(int32_t *)data; | |
468 | int ret; | |
469 | ||
470 | if (is_intx(vdev)) | |
471 | return vfio_intx_set_signal(vdev, fd); | |
472 | ||
473 | ret = vfio_intx_enable(vdev); | |
474 | if (ret) | |
475 | return ret; | |
476 | ||
477 | ret = vfio_intx_set_signal(vdev, fd); | |
478 | if (ret) | |
479 | vfio_intx_disable(vdev); | |
480 | ||
481 | return ret; | |
482 | } | |
483 | ||
484 | if (!is_intx(vdev)) | |
485 | return -EINVAL; | |
486 | ||
487 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | |
488 | vfio_send_intx_eventfd(vdev, NULL); | |
489 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | |
490 | uint8_t trigger = *(uint8_t *)data; | |
491 | if (trigger) | |
492 | vfio_send_intx_eventfd(vdev, NULL); | |
493 | } | |
494 | return 0; | |
495 | } | |
496 | ||
497 | static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, | |
498 | unsigned index, unsigned start, | |
499 | unsigned count, uint32_t flags, void *data) | |
500 | { | |
501 | int i; | |
502 | bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false; | |
503 | ||
504 | if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { | |
505 | vfio_msi_disable(vdev, msix); | |
506 | return 0; | |
507 | } | |
508 | ||
509 | if (!(irq_is(vdev, index) || is_irq_none(vdev))) | |
510 | return -EINVAL; | |
511 | ||
512 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
513 | int32_t *fds = data; | |
514 | int ret; | |
515 | ||
516 | if (vdev->irq_type == index) | |
517 | return vfio_msi_set_block(vdev, start, count, | |
518 | fds, msix); | |
519 | ||
520 | ret = vfio_msi_enable(vdev, start + count, msix); | |
521 | if (ret) | |
522 | return ret; | |
523 | ||
524 | ret = vfio_msi_set_block(vdev, start, count, fds, msix); | |
525 | if (ret) | |
526 | vfio_msi_disable(vdev, msix); | |
527 | ||
528 | return ret; | |
529 | } | |
530 | ||
531 | if (!irq_is(vdev, index) || start + count > vdev->num_ctx) | |
532 | return -EINVAL; | |
533 | ||
534 | for (i = start; i < start + count; i++) { | |
535 | if (!vdev->ctx[i].trigger) | |
536 | continue; | |
537 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | |
538 | eventfd_signal(vdev->ctx[i].trigger, 1); | |
539 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | |
540 | uint8_t *bools = data; | |
541 | if (bools[i - start]) | |
542 | eventfd_signal(vdev->ctx[i].trigger, 1); | |
543 | } | |
544 | } | |
545 | return 0; | |
546 | } | |
547 | ||
cac80d6e | 548 | static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, |
c8952a70 AW |
549 | unsigned int count, uint32_t flags, |
550 | void *data) | |
dad9f897 | 551 | { |
dad9f897 | 552 | /* DATA_NONE/DATA_BOOL enables loopback testing */ |
dad9f897 | 553 | if (flags & VFIO_IRQ_SET_DATA_NONE) { |
c8952a70 AW |
554 | if (*ctx) { |
555 | if (count) { | |
556 | eventfd_signal(*ctx, 1); | |
557 | } else { | |
558 | eventfd_ctx_put(*ctx); | |
559 | *ctx = NULL; | |
560 | } | |
561 | return 0; | |
562 | } | |
dad9f897 | 563 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { |
c8952a70 AW |
564 | uint8_t trigger; |
565 | ||
566 | if (!count) | |
567 | return -EINVAL; | |
568 | ||
569 | trigger = *(uint8_t *)data; | |
cac80d6e AW |
570 | if (trigger && *ctx) |
571 | eventfd_signal(*ctx, 1); | |
dad9f897 | 572 | |
dad9f897 | 573 | return 0; |
c8952a70 AW |
574 | } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { |
575 | int32_t fd; | |
576 | ||
577 | if (!count) | |
578 | return -EINVAL; | |
579 | ||
580 | fd = *(int32_t *)data; | |
581 | if (fd == -1) { | |
582 | if (*ctx) | |
583 | eventfd_ctx_put(*ctx); | |
584 | *ctx = NULL; | |
585 | } else if (fd >= 0) { | |
586 | struct eventfd_ctx *efdctx; | |
587 | ||
588 | efdctx = eventfd_ctx_fdget(fd); | |
589 | if (IS_ERR(efdctx)) | |
590 | return PTR_ERR(efdctx); | |
591 | ||
592 | if (*ctx) | |
593 | eventfd_ctx_put(*ctx); | |
594 | ||
595 | *ctx = efdctx; | |
596 | } | |
dad9f897 | 597 | return 0; |
c8952a70 AW |
598 | } |
599 | ||
600 | return -EINVAL; | |
dad9f897 | 601 | } |
cac80d6e AW |
602 | |
603 | static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, | |
604 | unsigned index, unsigned start, | |
605 | unsigned count, uint32_t flags, void *data) | |
606 | { | |
c8952a70 | 607 | if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) |
cac80d6e AW |
608 | return -EINVAL; |
609 | ||
c8952a70 AW |
610 | return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, |
611 | count, flags, data); | |
cac80d6e AW |
612 | } |
613 | ||
6140a8f5 AW |
614 | static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, |
615 | unsigned index, unsigned start, | |
616 | unsigned count, uint32_t flags, void *data) | |
617 | { | |
c8952a70 | 618 | if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) |
6140a8f5 AW |
619 | return -EINVAL; |
620 | ||
c8952a70 AW |
621 | return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, |
622 | count, flags, data); | |
6140a8f5 AW |
623 | } |
624 | ||
89e1f7d4 AW |
625 | int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, |
626 | unsigned index, unsigned start, unsigned count, | |
627 | void *data) | |
628 | { | |
629 | int (*func)(struct vfio_pci_device *vdev, unsigned index, | |
630 | unsigned start, unsigned count, uint32_t flags, | |
631 | void *data) = NULL; | |
632 | ||
633 | switch (index) { | |
634 | case VFIO_PCI_INTX_IRQ_INDEX: | |
635 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
636 | case VFIO_IRQ_SET_ACTION_MASK: | |
637 | func = vfio_pci_set_intx_mask; | |
638 | break; | |
639 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
640 | func = vfio_pci_set_intx_unmask; | |
641 | break; | |
642 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
643 | func = vfio_pci_set_intx_trigger; | |
644 | break; | |
645 | } | |
646 | break; | |
647 | case VFIO_PCI_MSI_IRQ_INDEX: | |
648 | case VFIO_PCI_MSIX_IRQ_INDEX: | |
649 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
650 | case VFIO_IRQ_SET_ACTION_MASK: | |
651 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
652 | /* XXX Need masking support exported */ | |
653 | break; | |
654 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
655 | func = vfio_pci_set_msi_trigger; | |
656 | break; | |
657 | } | |
658 | break; | |
dad9f897 VMP |
659 | case VFIO_PCI_ERR_IRQ_INDEX: |
660 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
661 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
662 | if (pci_is_pcie(vdev->pdev)) | |
663 | func = vfio_pci_set_err_trigger; | |
664 | break; | |
665 | } | |
ec76f400 | 666 | break; |
6140a8f5 AW |
667 | case VFIO_PCI_REQ_IRQ_INDEX: |
668 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
669 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
670 | func = vfio_pci_set_req_trigger; | |
671 | break; | |
672 | } | |
ec76f400 | 673 | break; |
89e1f7d4 AW |
674 | } |
675 | ||
676 | if (!func) | |
677 | return -ENOTTY; | |
678 | ||
679 | return func(vdev, index, start, count, flags, data); | |
680 | } |