Merge branch 'x86-paravirt-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / s390 / cio / vfio_ccw_drv.c
CommitLineData
724117b7 1// SPDX-License-Identifier: GPL-2.0
63f1934d
DJS
2/*
3 * VFIO based Physical Subchannel device driver
4 *
5 * Copyright IBM Corp. 2017
6 *
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/slab.h>
4e149e43
DJS
15#include <linux/uuid.h>
16#include <linux/mdev.h>
63f1934d
DJS
17
18#include <asm/isc.h>
19
4e149e43
DJS
20#include "ioasm.h"
21#include "css.h"
63f1934d
DJS
22#include "vfio_ccw_private.h"
23
e5f84dba 24struct workqueue_struct *vfio_ccw_work_q;
bf42daed 25struct kmem_cache *vfio_ccw_io_region;
e5f84dba 26
63f1934d
DJS
27/*
28 * Helpers
29 */
84cd8fc4 30int vfio_ccw_sch_quiesce(struct subchannel *sch)
63f1934d
DJS
31{
32 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
33 DECLARE_COMPLETION_ONSTACK(completion);
34 int iretry, ret = 0;
35
36 spin_lock_irq(sch->lock);
37 if (!sch->schib.pmcw.ena)
38 goto out_unlock;
39 ret = cio_disable_subchannel(sch);
40 if (ret != -EBUSY)
41 goto out_unlock;
42
43 do {
44 iretry = 255;
45
46 ret = cio_cancel_halt_clear(sch, &iretry);
47 while (ret == -EBUSY) {
48 /*
49 * Flush all I/O and wait for
50 * cancel/halt/clear completion.
51 */
52 private->completion = &completion;
53 spin_unlock_irq(sch->lock);
54
55 wait_for_completion_timeout(&completion, 3*HZ);
56
57 spin_lock_irq(sch->lock);
58 private->completion = NULL;
e5f84dba 59 flush_workqueue(vfio_ccw_work_q);
63f1934d
DJS
60 ret = cio_cancel_halt_clear(sch, &iretry);
61 };
62
63 ret = cio_disable_subchannel(sch);
64 } while (ret == -EBUSY);
63f1934d 65out_unlock:
bbe37e4c 66 private->state = VFIO_CCW_STATE_NOT_OPER;
63f1934d
DJS
67 spin_unlock_irq(sch->lock);
68 return ret;
69}
70
e5f84dba
DJS
71static void vfio_ccw_sch_io_todo(struct work_struct *work)
72{
73 struct vfio_ccw_private *private;
e5f84dba 74 struct irb *irb;
4e149e43 75
e5f84dba
DJS
76 private = container_of(work, struct vfio_ccw_private, io_work);
77 irb = &private->irb;
4e149e43 78
e5f84dba
DJS
79 if (scsw_is_solicited(&irb->scsw)) {
80 cp_update_scsw(&private->cp, &irb->scsw);
81 cp_free(&private->cp);
82 }
c98e16b2 83 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
e5f84dba
DJS
84
85 if (private->io_trigger)
86 eventfd_signal(private->io_trigger, 1);
4e149e43 87
bbe37e4c
DJS
88 if (private->mdev)
89 private->state = VFIO_CCW_STATE_IDLE;
4e149e43
DJS
90}
91
63f1934d
DJS
92/*
93 * Css driver callbacks
94 */
95static void vfio_ccw_sch_irq(struct subchannel *sch)
96{
97 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
98
99 inc_irq_stat(IRQIO_CIO);
bbe37e4c 100 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
63f1934d
DJS
101}
102
103static int vfio_ccw_sch_probe(struct subchannel *sch)
104{
105 struct pmcw *pmcw = &sch->schib.pmcw;
106 struct vfio_ccw_private *private;
107 int ret;
108
109 if (pmcw->qf) {
110 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
111 dev_name(&sch->dev));
112 return -ENODEV;
113 }
114
115 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
116 if (!private)
117 return -ENOMEM;
c98e16b2 118
bf42daed
EF
119 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
120 GFP_KERNEL | GFP_DMA);
c98e16b2
EF
121 if (!private->io_region) {
122 kfree(private);
123 return -ENOMEM;
124 }
125
63f1934d
DJS
126 private->sch = sch;
127 dev_set_drvdata(&sch->dev, private);
128
129 spin_lock_irq(sch->lock);
bbe37e4c 130 private->state = VFIO_CCW_STATE_NOT_OPER;
63f1934d
DJS
131 sch->isc = VFIO_CCW_ISC;
132 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
133 spin_unlock_irq(sch->lock);
134 if (ret)
135 goto out_free;
136
84cd8fc4
DJS
137 ret = vfio_ccw_mdev_reg(sch);
138 if (ret)
36f6237e 139 goto out_disable;
84cd8fc4 140
e5f84dba 141 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
84cd8fc4 142 atomic_set(&private->avail, 1);
bbe37e4c 143 private->state = VFIO_CCW_STATE_STANDBY;
84cd8fc4 144
63f1934d
DJS
145 return 0;
146
147out_disable:
148 cio_disable_subchannel(sch);
149out_free:
150 dev_set_drvdata(&sch->dev, NULL);
bf42daed 151 kmem_cache_free(vfio_ccw_io_region, private->io_region);
63f1934d
DJS
152 kfree(private);
153 return ret;
154}
155
156static int vfio_ccw_sch_remove(struct subchannel *sch)
157{
158 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
159
160 vfio_ccw_sch_quiesce(sch);
161
84cd8fc4
DJS
162 vfio_ccw_mdev_unreg(sch);
163
63f1934d
DJS
164 dev_set_drvdata(&sch->dev, NULL);
165
bf42daed 166 kmem_cache_free(vfio_ccw_io_region, private->io_region);
63f1934d
DJS
167 kfree(private);
168
169 return 0;
170}
171
172static void vfio_ccw_sch_shutdown(struct subchannel *sch)
173{
174 vfio_ccw_sch_quiesce(sch);
175}
176
177/**
178 * vfio_ccw_sch_event - process subchannel event
179 * @sch: subchannel
180 * @process: non-zero if function is called in process context
181 *
182 * An unspecified event occurred for this subchannel. Adjust data according
183 * to the current operational state of the subchannel. Return zero when the
184 * event has been handled sufficiently or -EAGAIN when this function should
185 * be called again in process context.
186 */
187static int vfio_ccw_sch_event(struct subchannel *sch, int process)
188{
bbe37e4c 189 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
63f1934d 190 unsigned long flags;
2c861d89 191 int rc = -EAGAIN;
63f1934d
DJS
192
193 spin_lock_irqsave(sch->lock, flags);
194 if (!device_is_registered(&sch->dev))
195 goto out_unlock;
196
197 if (work_pending(&sch->todo_work))
198 goto out_unlock;
199
200 if (cio_update_schib(sch)) {
bbe37e4c 201 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
2c861d89 202 rc = 0;
63f1934d
DJS
203 goto out_unlock;
204 }
205
bbe37e4c
DJS
206 private = dev_get_drvdata(&sch->dev);
207 if (private->state == VFIO_CCW_STATE_NOT_OPER) {
208 private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
209 VFIO_CCW_STATE_STANDBY;
210 }
2c861d89 211 rc = 0;
bbe37e4c 212
63f1934d
DJS
213out_unlock:
214 spin_unlock_irqrestore(sch->lock, flags);
215
2c861d89 216 return rc;
63f1934d
DJS
217}
218
219static struct css_device_id vfio_ccw_sch_ids[] = {
220 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
221 { /* end of list */ },
222};
223MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
224
225static struct css_driver vfio_ccw_sch_driver = {
226 .drv = {
227 .name = "vfio_ccw",
228 .owner = THIS_MODULE,
229 },
230 .subchannel_type = vfio_ccw_sch_ids,
231 .irq = vfio_ccw_sch_irq,
232 .probe = vfio_ccw_sch_probe,
233 .remove = vfio_ccw_sch_remove,
234 .shutdown = vfio_ccw_sch_shutdown,
235 .sch_event = vfio_ccw_sch_event,
236};
237
238static int __init vfio_ccw_sch_init(void)
239{
240 int ret;
241
e5f84dba
DJS
242 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
243 if (!vfio_ccw_work_q)
244 return -ENOMEM;
245
bf42daed
EF
246 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
247 sizeof(struct ccw_io_region), 0,
248 SLAB_ACCOUNT, 0,
249 sizeof(struct ccw_io_region), NULL);
250 if (!vfio_ccw_io_region) {
251 destroy_workqueue(vfio_ccw_work_q);
252 return -ENOMEM;
253 }
254
63f1934d
DJS
255 isc_register(VFIO_CCW_ISC);
256 ret = css_driver_register(&vfio_ccw_sch_driver);
e5f84dba 257 if (ret) {
63f1934d 258 isc_unregister(VFIO_CCW_ISC);
bf42daed 259 kmem_cache_destroy(vfio_ccw_io_region);
e5f84dba
DJS
260 destroy_workqueue(vfio_ccw_work_q);
261 }
63f1934d
DJS
262
263 return ret;
264}
265
266static void __exit vfio_ccw_sch_exit(void)
267{
268 css_driver_unregister(&vfio_ccw_sch_driver);
269 isc_unregister(VFIO_CCW_ISC);
bf42daed 270 kmem_cache_destroy(vfio_ccw_io_region);
e5f84dba 271 destroy_workqueue(vfio_ccw_work_q);
63f1934d
DJS
272}
273module_init(vfio_ccw_sch_init);
274module_exit(vfio_ccw_sch_exit);
275
276MODULE_LICENSE("GPL v2");