1 // SPDX-License-Identifier: GPL-2.0
3 * Finite state machine for vfio-ccw device handling
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Cornelia Huck <cohuck@redhat.com>
12 #include <linux/vfio.h>
17 #include "vfio_ccw_private.h"
19 static int fsm_io_helper(struct vfio_ccw_private *private)
21 struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
28 spin_lock_irqsave(sch->lock, flags);
30 orb = cp_get_orb(&private->cp, sch);
36 VFIO_CCW_TRACE_EVENT(5, "stIO");
37 VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
39 /* Issue "Start Subchannel" */
40 ccode = ssch(sch->schid, orb);
42 VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
47 * Initialize device status information
49 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
51 private->state = VFIO_CCW_STATE_CP_PENDING;
53 case 1: /* Status pending */
57 case 3: /* Device/path not operational */
65 if (cio_update_schib(sch))
68 ret = sch->lpm ? -EACCES : -ENODEV;
75 spin_unlock_irqrestore(sch->lock, flags);
79 static int fsm_do_halt(struct vfio_ccw_private *private)
81 struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
86 spin_lock_irqsave(sch->lock, flags);
88 VFIO_CCW_TRACE_EVENT(2, "haltIO");
89 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
91 /* Issue "Halt Subchannel" */
92 ccode = hsch(sch->schid);
94 VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
99 * Initialize device status information
101 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
104 case 1: /* Status pending */
108 case 3: /* Device not operational */
114 spin_unlock_irqrestore(sch->lock, flags);
118 static int fsm_do_clear(struct vfio_ccw_private *private)
120 struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
125 spin_lock_irqsave(sch->lock, flags);
127 VFIO_CCW_TRACE_EVENT(2, "clearIO");
128 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
130 /* Issue "Clear Subchannel" */
131 ccode = csch(sch->schid);
133 VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
138 * Initialize device status information
140 sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
141 /* TODO: check what else we might need to clear */
144 case 3: /* Device not operational */
150 spin_unlock_irqrestore(sch->lock, flags);
154 static void fsm_notoper(struct vfio_ccw_private *private,
155 enum vfio_ccw_event event)
157 struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
159 VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
168 * Probably we should send the machine check to the guest.
170 css_sched_sch_todo(sch, SCH_TODO_UNREG);
171 private->state = VFIO_CCW_STATE_NOT_OPER;
173 /* This is usually handled during CLOSE event */
174 cp_free(&private->cp);
178 * No operation action.
180 static void fsm_nop(struct vfio_ccw_private *private,
181 enum vfio_ccw_event event)
185 static void fsm_io_error(struct vfio_ccw_private *private,
186 enum vfio_ccw_event event)
188 pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
189 private->io_region->ret_code = -EIO;
192 static void fsm_io_busy(struct vfio_ccw_private *private,
193 enum vfio_ccw_event event)
195 private->io_region->ret_code = -EBUSY;
198 static void fsm_io_retry(struct vfio_ccw_private *private,
199 enum vfio_ccw_event event)
201 private->io_region->ret_code = -EAGAIN;
204 static void fsm_async_error(struct vfio_ccw_private *private,
205 enum vfio_ccw_event event)
207 struct ccw_cmd_region *cmd_region = private->cmd_region;
209 pr_err("vfio-ccw: FSM: %s request from state:%d\n",
210 cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
211 cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
212 "<unknown>", private->state);
213 cmd_region->ret_code = -EIO;
216 static void fsm_async_retry(struct vfio_ccw_private *private,
217 enum vfio_ccw_event event)
219 private->cmd_region->ret_code = -EAGAIN;
222 static void fsm_disabled_irq(struct vfio_ccw_private *private,
223 enum vfio_ccw_event event)
225 struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
228 * An interrupt in a disabled state means a previous disable was not
229 * successful - should not happen, but we try to disable again.
231 cio_disable_subchannel(sch);
233 inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
235 struct subchannel *sch = to_subchannel(p->vdev.dev->parent);
241 * Deal with the ccw command request from the userspace.
243 static void fsm_io_request(struct vfio_ccw_private *private,
244 enum vfio_ccw_event event)
247 union scsw *scsw = &private->scsw;
248 struct ccw_io_region *io_region = private->io_region;
249 char *errstr = "request";
250 struct subchannel_id schid = get_schid(private);
252 private->state = VFIO_CCW_STATE_CP_PROCESSING;
253 memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
255 if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
256 orb = (union orb *)io_region->orb_area;
258 /* Don't try to build a cp if transport mode is specified. */
260 io_region->ret_code = -EOPNOTSUPP;
261 VFIO_CCW_MSG_EVENT(2,
262 "sch %x.%x.%04x: transport mode\n",
264 schid.ssid, schid.sch_no);
265 errstr = "transport mode";
268 io_region->ret_code = cp_init(&private->cp, orb);
269 if (io_region->ret_code) {
270 VFIO_CCW_MSG_EVENT(2,
271 "sch %x.%x.%04x: cp_init=%d\n",
273 schid.ssid, schid.sch_no,
274 io_region->ret_code);
279 io_region->ret_code = cp_prefetch(&private->cp);
280 if (io_region->ret_code) {
281 VFIO_CCW_MSG_EVENT(2,
282 "sch %x.%x.%04x: cp_prefetch=%d\n",
284 schid.ssid, schid.sch_no,
285 io_region->ret_code);
286 errstr = "cp prefetch";
287 cp_free(&private->cp);
291 /* Start channel program and wait for I/O interrupt. */
292 io_region->ret_code = fsm_io_helper(private);
293 if (io_region->ret_code) {
294 VFIO_CCW_MSG_EVENT(2,
295 "sch %x.%x.%04x: fsm_io_helper=%d\n",
297 schid.ssid, schid.sch_no,
298 io_region->ret_code);
299 errstr = "cp fsm_io_helper";
300 cp_free(&private->cp);
304 } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
305 VFIO_CCW_MSG_EVENT(2,
306 "sch %x.%x.%04x: halt on io_region\n",
308 schid.ssid, schid.sch_no);
309 /* halt is handled via the async cmd region */
310 io_region->ret_code = -EOPNOTSUPP;
312 } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
313 VFIO_CCW_MSG_EVENT(2,
314 "sch %x.%x.%04x: clear on io_region\n",
316 schid.ssid, schid.sch_no);
317 /* clear is handled via the async cmd region */
318 io_region->ret_code = -EOPNOTSUPP;
323 private->state = VFIO_CCW_STATE_IDLE;
324 trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
325 io_region->ret_code, errstr);
329 * Deal with an async request from userspace.
331 static void fsm_async_request(struct vfio_ccw_private *private,
332 enum vfio_ccw_event event)
334 struct ccw_cmd_region *cmd_region = private->cmd_region;
336 switch (cmd_region->command) {
337 case VFIO_CCW_ASYNC_CMD_HSCH:
338 cmd_region->ret_code = fsm_do_halt(private);
340 case VFIO_CCW_ASYNC_CMD_CSCH:
341 cmd_region->ret_code = fsm_do_clear(private);
344 /* should not happen? */
345 cmd_region->ret_code = -EINVAL;
348 trace_vfio_ccw_fsm_async_request(get_schid(private),
350 cmd_region->ret_code);
354 * Got an interrupt for a normal io (state busy).
356 static void fsm_irq(struct vfio_ccw_private *private,
357 enum vfio_ccw_event event)
359 struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
360 struct irb *irb = this_cpu_ptr(&cio_irb);
362 VFIO_CCW_TRACE_EVENT(6, "IRQ");
363 VFIO_CCW_TRACE_EVENT(6, dev_name(&sch->dev));
365 memcpy(&private->irb, irb, sizeof(*irb));
367 queue_work(vfio_ccw_work_q, &private->io_work);
369 if (private->completion)
370 complete(private->completion);
373 static void fsm_open(struct vfio_ccw_private *private,
374 enum vfio_ccw_event event)
376 struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
379 spin_lock_irq(sch->lock);
380 sch->isc = VFIO_CCW_ISC;
381 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
385 private->state = VFIO_CCW_STATE_IDLE;
386 spin_unlock_irq(sch->lock);
390 spin_unlock_irq(sch->lock);
391 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
394 static void fsm_close(struct vfio_ccw_private *private,
395 enum vfio_ccw_event event)
397 struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
400 spin_lock_irq(sch->lock);
402 if (!sch->schib.pmcw.ena)
405 ret = cio_disable_subchannel(sch);
407 ret = vfio_ccw_sch_quiesce(sch);
411 private->state = VFIO_CCW_STATE_STANDBY;
412 spin_unlock_irq(sch->lock);
413 cp_free(&private->cp);
417 spin_unlock_irq(sch->lock);
418 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
422 * Device statemachine
424 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
425 [VFIO_CCW_STATE_NOT_OPER] = {
426 [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
427 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
428 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
429 [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
430 [VFIO_CCW_EVENT_OPEN] = fsm_nop,
431 [VFIO_CCW_EVENT_CLOSE] = fsm_nop,
433 [VFIO_CCW_STATE_STANDBY] = {
434 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
435 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
436 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
437 [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
438 [VFIO_CCW_EVENT_OPEN] = fsm_open,
439 [VFIO_CCW_EVENT_CLOSE] = fsm_notoper,
441 [VFIO_CCW_STATE_IDLE] = {
442 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
443 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
444 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
445 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
446 [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
447 [VFIO_CCW_EVENT_CLOSE] = fsm_close,
449 [VFIO_CCW_STATE_CP_PROCESSING] = {
450 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
451 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
452 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
453 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
454 [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
455 [VFIO_CCW_EVENT_CLOSE] = fsm_close,
457 [VFIO_CCW_STATE_CP_PENDING] = {
458 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
459 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
460 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
461 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
462 [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
463 [VFIO_CCW_EVENT_CLOSE] = fsm_close,