1 // SPDX-License-Identifier: GPL-1.0+
3 * bus driver for ccw devices
5 * Copyright IBM Corp. 2002, 2008
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/device.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <linux/timer.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/sched/signal.h>
27 #include <linux/dma-mapping.h>
29 #include <asm/ccwdev.h>
31 #include <asm/param.h> /* HZ */
37 #include "cio_debug.h"
42 #include "blacklist.h"
45 static struct timer_list recovery_timer;
46 static DEFINE_SPINLOCK(recovery_lock);
47 static int recovery_phase;
48 static const unsigned long recovery_delay[] = { 3, 30, 300 };
50 static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
51 static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
52 static struct bus_type ccw_bus_type;
54 /******************* bus type handling ***********************/
56 /* The Linux driver model distinguishes between a bus type and
57 * the bus itself. Of course we only have one channel
58 * subsystem driver and one channel system per machine, but
59 * we still use the abstraction. T.R. says it's a good idea. */
61 ccw_bus_match (struct device * dev, struct device_driver * drv)
63 struct ccw_device *cdev = to_ccwdev(dev);
64 struct ccw_driver *cdrv = to_ccwdrv(drv);
65 const struct ccw_device_id *ids = cdrv->ids, *found;
70 found = ccw_device_id_match(ids, &cdev->id);
74 cdev->id.driver_info = found->driver_info;
79 /* Store modalias string delimited by prefix/suffix string into buffer with
80 * specified size. Return length of resulting string (excluding trailing '\0')
81 * even if string doesn't fit buffer (snprintf semantics). */
82 static int snprint_alias(char *buf, size_t size,
83 struct ccw_device_id *id, const char *suffix)
87 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
93 if (id->dev_type != 0)
94 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
95 id->dev_model, suffix);
97 len += snprintf(buf, size, "dtdm%s", suffix);
102 /* Set up environment variables for ccw device uevent. Return 0 on success,
103 * non-zero otherwise. */
104 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
106 struct ccw_device *cdev = to_ccwdev(dev);
107 struct ccw_device_id *id = &(cdev->id);
109 char modalias_buf[30];
112 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
117 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
121 /* The next two can be zero, that's ok for us */
123 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
128 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
133 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
134 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
138 static void io_subchannel_irq(struct subchannel *);
139 static int io_subchannel_probe(struct subchannel *);
140 static void io_subchannel_remove(struct subchannel *);
141 static void io_subchannel_shutdown(struct subchannel *);
142 static int io_subchannel_sch_event(struct subchannel *, int);
143 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
145 static void recovery_func(struct timer_list *unused);
147 static struct css_device_id io_subchannel_ids[] = {
148 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
149 { /* end of list */ },
152 static int io_subchannel_settle(void)
156 ret = wait_event_interruptible(ccw_device_init_wq,
157 atomic_read(&ccw_device_init_count) == 0);
160 flush_workqueue(cio_work_q);
164 static struct css_driver io_subchannel_driver = {
166 .owner = THIS_MODULE,
167 .name = "io_subchannel",
169 .subchannel_type = io_subchannel_ids,
170 .irq = io_subchannel_irq,
171 .sch_event = io_subchannel_sch_event,
172 .chp_event = io_subchannel_chp_event,
173 .probe = io_subchannel_probe,
174 .remove = io_subchannel_remove,
175 .shutdown = io_subchannel_shutdown,
176 .settle = io_subchannel_settle,
179 int __init io_subchannel_init(void)
183 timer_setup(&recovery_timer, recovery_func, 0);
184 ret = bus_register(&ccw_bus_type);
187 ret = css_driver_register(&io_subchannel_driver);
189 bus_unregister(&ccw_bus_type);
195 /************************ device handling **************************/
198 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
200 struct ccw_device *cdev = to_ccwdev(dev);
201 struct ccw_device_id *id = &(cdev->id);
203 if (id->dev_type != 0)
204 return sprintf(buf, "%04x/%02x\n",
205 id->dev_type, id->dev_model);
207 return sprintf(buf, "n/a\n");
211 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
213 struct ccw_device *cdev = to_ccwdev(dev);
214 struct ccw_device_id *id = &(cdev->id);
216 return sprintf(buf, "%04x/%02x\n",
217 id->cu_type, id->cu_model);
221 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
223 struct ccw_device *cdev = to_ccwdev(dev);
224 struct ccw_device_id *id = &(cdev->id);
227 len = snprint_alias(buf, PAGE_SIZE, id, "\n");
229 return len > PAGE_SIZE ? PAGE_SIZE : len;
233 online_show (struct device *dev, struct device_attribute *attr, char *buf)
235 struct ccw_device *cdev = to_ccwdev(dev);
237 return sprintf(buf, cdev->online ? "1\n" : "0\n");
240 int ccw_device_is_orphan(struct ccw_device *cdev)
242 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
245 static void ccw_device_unregister(struct ccw_device *cdev)
247 mutex_lock(&cdev->reg_mutex);
248 if (device_is_registered(&cdev->dev)) {
249 /* Undo device_add(). */
250 device_del(&cdev->dev);
252 mutex_unlock(&cdev->reg_mutex);
254 if (cdev->private->flags.initialized) {
255 cdev->private->flags.initialized = 0;
256 /* Release reference from device_initialize(). */
257 put_device(&cdev->dev);
261 static void io_subchannel_quiesce(struct subchannel *);
264 * ccw_device_set_offline() - disable a ccw device for I/O
265 * @cdev: target ccw device
267 * This function calls the driver's set_offline() function for @cdev, if
268 * given, and then disables @cdev.
270 * %0 on success and a negative error value on failure.
272 * enabled, ccw device lock not held
274 int ccw_device_set_offline(struct ccw_device *cdev)
276 struct subchannel *sch;
281 if (!cdev->online || !cdev->drv)
284 if (cdev->drv->set_offline) {
285 ret = cdev->drv->set_offline(cdev);
289 spin_lock_irq(cdev->ccwlock);
290 sch = to_subchannel(cdev->dev.parent);
292 /* Wait until a final state or DISCONNECTED is reached */
293 while (!dev_fsm_final_state(cdev) &&
294 cdev->private->state != DEV_STATE_DISCONNECTED) {
295 spin_unlock_irq(cdev->ccwlock);
296 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
297 cdev->private->state == DEV_STATE_DISCONNECTED));
298 spin_lock_irq(cdev->ccwlock);
301 ret = ccw_device_offline(cdev);
304 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
305 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
306 cdev->private->dev_id.devno);
309 state = cdev->private->state;
310 spin_unlock_irq(cdev->ccwlock);
311 io_subchannel_quiesce(sch);
312 spin_lock_irq(cdev->ccwlock);
313 cdev->private->state = state;
314 } while (ret == -EBUSY);
315 spin_unlock_irq(cdev->ccwlock);
316 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
317 cdev->private->state == DEV_STATE_DISCONNECTED));
318 /* Inform the user if set offline failed. */
319 if (cdev->private->state == DEV_STATE_BOXED) {
320 pr_warn("%s: The device entered boxed state while being set offline\n",
321 dev_name(&cdev->dev));
322 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
323 pr_warn("%s: The device stopped operating while being set offline\n",
324 dev_name(&cdev->dev));
326 /* Give up reference from ccw_device_set_online(). */
327 put_device(&cdev->dev);
331 cdev->private->state = DEV_STATE_OFFLINE;
332 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
333 spin_unlock_irq(cdev->ccwlock);
334 /* Give up reference from ccw_device_set_online(). */
335 put_device(&cdev->dev);
340 * ccw_device_set_online() - enable a ccw device for I/O
341 * @cdev: target ccw device
343 * This function first enables @cdev and then calls the driver's set_online()
344 * function for @cdev, if given. If set_online() returns an error, @cdev is
347 * %0 on success and a negative error value on failure.
349 * enabled, ccw device lock not held
351 int ccw_device_set_online(struct ccw_device *cdev)
358 if (cdev->online || !cdev->drv)
360 /* Hold on to an extra reference while device is online. */
361 if (!get_device(&cdev->dev))
364 spin_lock_irq(cdev->ccwlock);
365 ret = ccw_device_online(cdev);
366 spin_unlock_irq(cdev->ccwlock);
368 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
370 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
371 "device 0.%x.%04x\n",
372 ret, cdev->private->dev_id.ssid,
373 cdev->private->dev_id.devno);
374 /* Give up online reference since onlining failed. */
375 put_device(&cdev->dev);
378 spin_lock_irq(cdev->ccwlock);
379 /* Check if online processing was successful */
380 if ((cdev->private->state != DEV_STATE_ONLINE) &&
381 (cdev->private->state != DEV_STATE_W4SENSE)) {
382 spin_unlock_irq(cdev->ccwlock);
383 /* Inform the user that set online failed. */
384 if (cdev->private->state == DEV_STATE_BOXED) {
385 pr_warn("%s: Setting the device online failed because it is boxed\n",
386 dev_name(&cdev->dev));
387 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
388 pr_warn("%s: Setting the device online failed because it is not operational\n",
389 dev_name(&cdev->dev));
391 /* Give up online reference since onlining failed. */
392 put_device(&cdev->dev);
395 spin_unlock_irq(cdev->ccwlock);
396 if (cdev->drv->set_online)
397 ret = cdev->drv->set_online(cdev);
401 spin_lock_irq(cdev->ccwlock);
403 spin_unlock_irq(cdev->ccwlock);
407 spin_lock_irq(cdev->ccwlock);
408 /* Wait until a final state or DISCONNECTED is reached */
409 while (!dev_fsm_final_state(cdev) &&
410 cdev->private->state != DEV_STATE_DISCONNECTED) {
411 spin_unlock_irq(cdev->ccwlock);
412 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
413 cdev->private->state == DEV_STATE_DISCONNECTED));
414 spin_lock_irq(cdev->ccwlock);
416 ret2 = ccw_device_offline(cdev);
419 spin_unlock_irq(cdev->ccwlock);
420 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
421 cdev->private->state == DEV_STATE_DISCONNECTED));
422 /* Give up online reference since onlining failed. */
423 put_device(&cdev->dev);
427 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
428 "device 0.%x.%04x\n",
429 ret2, cdev->private->dev_id.ssid,
430 cdev->private->dev_id.devno);
431 cdev->private->state = DEV_STATE_OFFLINE;
432 spin_unlock_irq(cdev->ccwlock);
433 /* Give up online reference since onlining failed. */
434 put_device(&cdev->dev);
438 static int online_store_handle_offline(struct ccw_device *cdev)
440 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
441 spin_lock_irq(cdev->ccwlock);
442 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
443 spin_unlock_irq(cdev->ccwlock);
446 if (cdev->drv && cdev->drv->set_offline)
447 return ccw_device_set_offline(cdev);
451 static int online_store_recog_and_online(struct ccw_device *cdev)
453 /* Do device recognition, if needed. */
454 if (cdev->private->state == DEV_STATE_BOXED) {
455 spin_lock_irq(cdev->ccwlock);
456 ccw_device_recognition(cdev);
457 spin_unlock_irq(cdev->ccwlock);
458 wait_event(cdev->private->wait_q,
459 cdev->private->flags.recog_done);
460 if (cdev->private->state != DEV_STATE_OFFLINE)
461 /* recognition failed */
464 if (cdev->drv && cdev->drv->set_online)
465 return ccw_device_set_online(cdev);
469 static int online_store_handle_online(struct ccw_device *cdev, int force)
473 ret = online_store_recog_and_online(cdev);
476 if (force && cdev->private->state == DEV_STATE_BOXED) {
477 ret = ccw_device_stlck(cdev);
480 if (cdev->id.cu_type == 0)
481 cdev->private->state = DEV_STATE_NOT_OPER;
482 ret = online_store_recog_and_online(cdev);
489 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
490 const char *buf, size_t count)
492 struct ccw_device *cdev = to_ccwdev(dev);
496 /* Prevent conflict between multiple on-/offline processing requests. */
497 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
499 /* Prevent conflict between internal I/Os and on-/offline processing. */
500 if (!dev_fsm_final_state(cdev) &&
501 cdev->private->state != DEV_STATE_DISCONNECTED) {
505 /* Prevent conflict between pending work and on-/offline processing.*/
506 if (work_pending(&cdev->private->todo_work)) {
510 if (!strncmp(buf, "force\n", count)) {
516 ret = kstrtoul(buf, 16, &i);
524 ret = online_store_handle_offline(cdev);
527 ret = online_store_handle_online(cdev, force);
535 atomic_set(&cdev->private->onoff, 0);
536 return (ret < 0) ? ret : count;
540 available_show (struct device *dev, struct device_attribute *attr, char *buf)
542 struct ccw_device *cdev = to_ccwdev(dev);
543 struct subchannel *sch;
545 if (ccw_device_is_orphan(cdev))
546 return sprintf(buf, "no device\n");
547 switch (cdev->private->state) {
548 case DEV_STATE_BOXED:
549 return sprintf(buf, "boxed\n");
550 case DEV_STATE_DISCONNECTED:
551 case DEV_STATE_DISCONNECTED_SENSE_ID:
552 case DEV_STATE_NOT_OPER:
553 sch = to_subchannel(dev->parent);
555 return sprintf(buf, "no path\n");
557 return sprintf(buf, "no device\n");
559 /* All other states considered fine. */
560 return sprintf(buf, "good\n");
565 initiate_logging(struct device *dev, struct device_attribute *attr,
566 const char *buf, size_t count)
568 struct subchannel *sch = to_subchannel(dev);
571 rc = chsc_siosl(sch->schid);
573 pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
574 sch->schid.ssid, sch->schid.sch_no, rc);
577 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
578 sch->schid.ssid, sch->schid.sch_no);
582 static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
585 struct subchannel *sch = to_subchannel(dev);
587 return sprintf(buf, "%02x\n", sch->vpm);
590 static DEVICE_ATTR_RO(devtype);
591 static DEVICE_ATTR_RO(cutype);
592 static DEVICE_ATTR_RO(modalias);
593 static DEVICE_ATTR_RW(online);
594 static DEVICE_ATTR(availability, 0444, available_show, NULL);
595 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
596 static DEVICE_ATTR_RO(vpm);
598 static struct attribute *io_subchannel_attrs[] = {
599 &dev_attr_logging.attr,
604 static const struct attribute_group io_subchannel_attr_group = {
605 .attrs = io_subchannel_attrs,
608 static struct attribute * ccwdev_attrs[] = {
609 &dev_attr_devtype.attr,
610 &dev_attr_cutype.attr,
611 &dev_attr_modalias.attr,
612 &dev_attr_online.attr,
613 &dev_attr_cmb_enable.attr,
614 &dev_attr_availability.attr,
618 static const struct attribute_group ccwdev_attr_group = {
619 .attrs = ccwdev_attrs,
622 static const struct attribute_group *ccwdev_attr_groups[] = {
627 static int match_dev_id(struct device *dev, const void *data)
629 struct ccw_device *cdev = to_ccwdev(dev);
630 struct ccw_dev_id *dev_id = (void *)data;
632 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
636 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
637 * @dev_id: id of the device to be searched
639 * This function searches all devices attached to the ccw bus for a device
642 * If a device is found its reference count is increased and returned;
643 * else %NULL is returned.
645 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
649 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
651 return dev ? to_ccwdev(dev) : NULL;
653 EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
655 static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
659 mutex_lock(&cdev->reg_mutex);
660 if (device_is_registered(&cdev->dev)) {
661 device_release_driver(&cdev->dev);
662 ret = device_attach(&cdev->dev);
663 WARN_ON(ret == -ENODEV);
665 mutex_unlock(&cdev->reg_mutex);
669 ccw_device_release(struct device *dev)
671 struct ccw_device *cdev;
673 cdev = to_ccwdev(dev);
674 cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
675 sizeof(*cdev->private->dma_area));
676 cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
677 /* Release reference of parent subchannel. */
678 put_device(cdev->dev.parent);
679 kfree(cdev->private);
683 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
685 struct ccw_device *cdev;
686 struct gen_pool *dma_pool;
689 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
694 cdev->private = kzalloc(sizeof(struct ccw_device_private),
695 GFP_KERNEL | GFP_DMA);
696 if (!cdev->private) {
701 cdev->dev.dma_mask = sch->dev.dma_mask;
702 ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
704 goto err_coherent_mask;
706 dma_pool = cio_gp_dma_create(&cdev->dev, 1);
711 cdev->private->dma_pool = dma_pool;
712 cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
713 sizeof(*cdev->private->dma_area));
714 if (!cdev->private->dma_area) {
720 cio_gp_dma_destroy(dma_pool, &cdev->dev);
723 kfree(cdev->private);
730 static void ccw_device_todo(struct work_struct *work);
732 static int io_subchannel_initialize_dev(struct subchannel *sch,
733 struct ccw_device *cdev)
735 struct ccw_device_private *priv = cdev->private;
739 priv->int_class = IRQIO_CIO;
740 priv->state = DEV_STATE_NOT_OPER;
741 priv->dev_id.devno = sch->schib.pmcw.dev;
742 priv->dev_id.ssid = sch->schid.ssid;
744 INIT_WORK(&priv->todo_work, ccw_device_todo);
745 INIT_LIST_HEAD(&priv->cmb_list);
746 init_waitqueue_head(&priv->wait_q);
747 timer_setup(&priv->timer, ccw_device_timeout, 0);
748 mutex_init(&cdev->reg_mutex);
750 atomic_set(&priv->onoff, 0);
751 cdev->ccwlock = sch->lock;
752 cdev->dev.parent = &sch->dev;
753 cdev->dev.release = ccw_device_release;
754 cdev->dev.bus = &ccw_bus_type;
755 cdev->dev.groups = ccwdev_attr_groups;
756 /* Do first half of device_register. */
757 device_initialize(&cdev->dev);
758 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
759 cdev->private->dev_id.devno);
762 if (!get_device(&sch->dev)) {
766 priv->flags.initialized = 1;
767 spin_lock_irq(sch->lock);
768 sch_set_cdev(sch, cdev);
769 spin_unlock_irq(sch->lock);
773 /* Release reference from device_initialize(). */
774 put_device(&cdev->dev);
778 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
780 struct ccw_device *cdev;
783 cdev = io_subchannel_allocate_dev(sch);
785 ret = io_subchannel_initialize_dev(sch, cdev);
792 static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
794 static void sch_create_and_recog_new_device(struct subchannel *sch)
796 struct ccw_device *cdev;
798 /* Need to allocate a new ccw device. */
799 cdev = io_subchannel_create_ccwdev(sch);
801 /* OK, we did everything we could... */
802 css_sch_device_unregister(sch);
805 /* Start recognition for the new ccw device. */
806 io_subchannel_recog(cdev, sch);
810 * Register recognized device.
812 static void io_subchannel_register(struct ccw_device *cdev)
814 struct subchannel *sch;
815 int ret, adjust_init_count = 1;
818 sch = to_subchannel(cdev->dev.parent);
820 * Check if subchannel is still registered. It may have become
821 * unregistered if a machine check hit us after finishing
822 * device recognition but before the register work could be
825 if (!device_is_registered(&sch->dev))
827 css_update_ssd_info(sch);
829 * io_subchannel_register() will also be called after device
830 * recognition has been done for a boxed device (which will already
831 * be registered). We need to reprobe since we may now have sense id
834 mutex_lock(&cdev->reg_mutex);
835 if (device_is_registered(&cdev->dev)) {
837 ret = device_reprobe(&cdev->dev);
839 /* We can't do much here. */
840 CIO_MSG_EVENT(0, "device_reprobe() returned"
841 " %d for 0.%x.%04x\n", ret,
842 cdev->private->dev_id.ssid,
843 cdev->private->dev_id.devno);
845 adjust_init_count = 0;
848 /* make it known to the system */
849 ret = device_add(&cdev->dev);
851 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
852 cdev->private->dev_id.ssid,
853 cdev->private->dev_id.devno, ret);
854 spin_lock_irqsave(sch->lock, flags);
855 sch_set_cdev(sch, NULL);
856 spin_unlock_irqrestore(sch->lock, flags);
857 mutex_unlock(&cdev->reg_mutex);
858 /* Release initial device reference. */
859 put_device(&cdev->dev);
863 cdev->private->flags.recog_done = 1;
864 mutex_unlock(&cdev->reg_mutex);
865 wake_up(&cdev->private->wait_q);
867 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
868 wake_up(&ccw_device_init_wq);
872 * subchannel recognition done. Called from the state machine.
875 io_subchannel_recog_done(struct ccw_device *cdev)
877 if (css_init_done == 0) {
878 cdev->private->flags.recog_done = 1;
881 switch (cdev->private->state) {
882 case DEV_STATE_BOXED:
883 /* Device did not respond in time. */
884 case DEV_STATE_NOT_OPER:
885 cdev->private->flags.recog_done = 1;
886 /* Remove device found not operational. */
887 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
888 if (atomic_dec_and_test(&ccw_device_init_count))
889 wake_up(&ccw_device_init_wq);
891 case DEV_STATE_OFFLINE:
893 * We can't register the device in interrupt context so
894 * we schedule a work item.
896 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
901 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
903 /* Increase counter of devices currently in recognition. */
904 atomic_inc(&ccw_device_init_count);
906 /* Start async. device sensing. */
907 spin_lock_irq(sch->lock);
908 ccw_device_recognition(cdev);
909 spin_unlock_irq(sch->lock);
912 static int ccw_device_move_to_sch(struct ccw_device *cdev,
913 struct subchannel *sch)
915 struct subchannel *old_sch;
916 int rc, old_enabled = 0;
918 old_sch = to_subchannel(cdev->dev.parent);
919 /* Obtain child reference for new parent. */
920 if (!get_device(&sch->dev))
923 if (!sch_is_pseudo_sch(old_sch)) {
924 spin_lock_irq(old_sch->lock);
925 old_enabled = old_sch->schib.pmcw.ena;
928 rc = cio_disable_subchannel(old_sch);
929 spin_unlock_irq(old_sch->lock);
931 /* Release child reference for new parent. */
932 put_device(&sch->dev);
937 mutex_lock(&sch->reg_mutex);
938 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
939 mutex_unlock(&sch->reg_mutex);
941 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
942 cdev->private->dev_id.ssid,
943 cdev->private->dev_id.devno, sch->schid.ssid,
944 sch->schib.pmcw.dev, rc);
946 /* Try to reenable the old subchannel. */
947 spin_lock_irq(old_sch->lock);
948 cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch));
949 spin_unlock_irq(old_sch->lock);
951 /* Release child reference for new parent. */
952 put_device(&sch->dev);
955 /* Clean up old subchannel. */
956 if (!sch_is_pseudo_sch(old_sch)) {
957 spin_lock_irq(old_sch->lock);
958 sch_set_cdev(old_sch, NULL);
959 spin_unlock_irq(old_sch->lock);
960 css_schedule_eval(old_sch->schid);
962 /* Release child reference for old parent. */
963 put_device(&old_sch->dev);
964 /* Initialize new subchannel. */
965 spin_lock_irq(sch->lock);
966 cdev->ccwlock = sch->lock;
967 if (!sch_is_pseudo_sch(sch))
968 sch_set_cdev(sch, cdev);
969 spin_unlock_irq(sch->lock);
970 if (!sch_is_pseudo_sch(sch))
971 css_update_ssd_info(sch);
975 static int ccw_device_move_to_orph(struct ccw_device *cdev)
977 struct subchannel *sch = to_subchannel(cdev->dev.parent);
978 struct channel_subsystem *css = to_css(sch->dev.parent);
980 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
983 static void io_subchannel_irq(struct subchannel *sch)
985 struct ccw_device *cdev;
987 cdev = sch_get_cdev(sch);
989 CIO_TRACE_EVENT(6, "IRQ");
990 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
992 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
994 inc_irq_stat(IRQIO_CIO);
997 void io_subchannel_init_config(struct subchannel *sch)
999 memset(&sch->config, 0, sizeof(sch->config));
1000 sch->config.csense = 1;
1003 static void io_subchannel_init_fields(struct subchannel *sch)
1005 if (cio_is_console(sch->schid))
1008 sch->opm = chp_get_sch_opm(sch);
1009 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1010 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1012 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1013 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1014 sch->schib.pmcw.dev, sch->schid.ssid,
1015 sch->schid.sch_no, sch->schib.pmcw.pim,
1016 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1018 io_subchannel_init_config(sch);
1022 * Note: We always return 0 so that we bind to the device even on error.
1023 * This is needed so that our remove function is called on unregister.
1025 static int io_subchannel_probe(struct subchannel *sch)
1027 struct io_subchannel_private *io_priv;
1028 struct ccw_device *cdev;
1031 if (cio_is_console(sch->schid)) {
1032 rc = sysfs_create_group(&sch->dev.kobj,
1033 &io_subchannel_attr_group);
1035 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1036 "attributes for subchannel "
1037 "0.%x.%04x (rc=%d)\n",
1038 sch->schid.ssid, sch->schid.sch_no, rc);
1040 * The console subchannel already has an associated ccw_device.
1041 * Register it and exit.
1043 cdev = sch_get_cdev(sch);
1044 rc = device_add(&cdev->dev);
1046 /* Release online reference. */
1047 put_device(&cdev->dev);
1050 if (atomic_dec_and_test(&ccw_device_init_count))
1051 wake_up(&ccw_device_init_wq);
1054 io_subchannel_init_fields(sch);
1055 rc = cio_commit_config(sch);
1058 rc = sysfs_create_group(&sch->dev.kobj,
1059 &io_subchannel_attr_group);
1062 /* Allocate I/O subchannel private data. */
1063 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1067 io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1068 sizeof(*io_priv->dma_area),
1069 &io_priv->dma_area_dma, GFP_KERNEL);
1070 if (!io_priv->dma_area) {
1075 set_io_private(sch, io_priv);
1076 css_schedule_eval(sch->schid);
1080 spin_lock_irq(sch->lock);
1081 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1082 spin_unlock_irq(sch->lock);
1086 static void io_subchannel_remove(struct subchannel *sch)
1088 struct io_subchannel_private *io_priv = to_io_private(sch);
1089 struct ccw_device *cdev;
1091 cdev = sch_get_cdev(sch);
1095 ccw_device_unregister(cdev);
1096 spin_lock_irq(sch->lock);
1097 sch_set_cdev(sch, NULL);
1098 set_io_private(sch, NULL);
1099 spin_unlock_irq(sch->lock);
1101 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1102 io_priv->dma_area, io_priv->dma_area_dma);
1104 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1107 static void io_subchannel_verify(struct subchannel *sch)
1109 struct ccw_device *cdev;
1111 cdev = sch_get_cdev(sch);
1113 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1116 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1118 struct ccw_device *cdev;
1120 cdev = sch_get_cdev(sch);
1123 if (cio_update_schib(sch))
1125 /* Check for I/O on path. */
1126 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1128 if (cdev->private->state == DEV_STATE_ONLINE) {
1129 ccw_device_kill_io(cdev);
1135 /* Trigger path verification. */
1136 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1140 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1143 static int io_subchannel_chp_event(struct subchannel *sch,
1144 struct chp_link *link, int event)
1146 struct ccw_device *cdev = sch_get_cdev(sch);
1147 int mask, chpid, valid_bit;
1150 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1158 cdev->private->path_gone_mask |= mask;
1159 io_subchannel_terminate_path(sch, mask);
1165 cdev->private->path_new_mask |= mask;
1166 io_subchannel_verify(sch);
1169 if (cio_update_schib(sch))
1172 cdev->private->path_gone_mask |= mask;
1173 io_subchannel_terminate_path(sch, mask);
1176 if (cio_update_schib(sch))
1178 sch->lpm |= mask & sch->opm;
1180 cdev->private->path_new_mask |= mask;
1181 io_subchannel_verify(sch);
1183 case CHP_FCES_EVENT:
1184 /* Forward Endpoint Security event */
1185 for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++,
1187 if (mask & valid_bit)
1188 path_event[chpid] = PE_PATH_FCES_EVENT;
1190 path_event[chpid] = PE_NONE;
1192 if (cdev && cdev->drv && cdev->drv->path_event)
1193 cdev->drv->path_event(cdev, path_event);
1199 static void io_subchannel_quiesce(struct subchannel *sch)
1201 struct ccw_device *cdev;
1204 spin_lock_irq(sch->lock);
1205 cdev = sch_get_cdev(sch);
1206 if (cio_is_console(sch->schid))
1208 if (!sch->schib.pmcw.ena)
1210 ret = cio_disable_subchannel(sch);
1214 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1215 while (ret == -EBUSY) {
1216 cdev->private->state = DEV_STATE_QUIESCE;
1217 cdev->private->iretry = 255;
1218 ret = ccw_device_cancel_halt_clear(cdev);
1219 if (ret == -EBUSY) {
1220 ccw_device_set_timeout(cdev, HZ/10);
1221 spin_unlock_irq(sch->lock);
1222 wait_event(cdev->private->wait_q,
1223 cdev->private->state != DEV_STATE_QUIESCE);
1224 spin_lock_irq(sch->lock);
1226 ret = cio_disable_subchannel(sch);
1229 spin_unlock_irq(sch->lock);
1232 static void io_subchannel_shutdown(struct subchannel *sch)
1234 io_subchannel_quiesce(sch);
1237 static int device_is_disconnected(struct ccw_device *cdev)
1241 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1242 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1245 static int recovery_check(struct device *dev, void *data)
1247 struct ccw_device *cdev = to_ccwdev(dev);
1248 struct subchannel *sch;
1251 spin_lock_irq(cdev->ccwlock);
1252 switch (cdev->private->state) {
1253 case DEV_STATE_ONLINE:
1254 sch = to_subchannel(cdev->dev.parent);
1255 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1258 case DEV_STATE_DISCONNECTED:
1259 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1260 cdev->private->dev_id.ssid,
1261 cdev->private->dev_id.devno);
1262 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1265 case DEV_STATE_DISCONNECTED_SENSE_ID:
1269 spin_unlock_irq(cdev->ccwlock);
1274 static void recovery_work_func(struct work_struct *unused)
1278 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1280 spin_lock_irq(&recovery_lock);
1281 if (!timer_pending(&recovery_timer)) {
1282 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1284 mod_timer(&recovery_timer, jiffies +
1285 recovery_delay[recovery_phase] * HZ);
1287 spin_unlock_irq(&recovery_lock);
1289 CIO_MSG_EVENT(3, "recovery: end\n");
1292 static DECLARE_WORK(recovery_work, recovery_work_func);
1294 static void recovery_func(struct timer_list *unused)
1297 * We can't do our recovery in softirq context and it's not
1298 * performance critical, so we schedule it.
1300 schedule_work(&recovery_work);
1303 void ccw_device_schedule_recovery(void)
1305 unsigned long flags;
1307 CIO_MSG_EVENT(3, "recovery: schedule\n");
1308 spin_lock_irqsave(&recovery_lock, flags);
1309 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1311 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1313 spin_unlock_irqrestore(&recovery_lock, flags);
1316 static int purge_fn(struct device *dev, void *data)
1318 struct ccw_device *cdev = to_ccwdev(dev);
1319 struct ccw_dev_id *id = &cdev->private->dev_id;
1320 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1322 spin_lock_irq(cdev->ccwlock);
1323 if (is_blacklisted(id->ssid, id->devno) &&
1324 (cdev->private->state == DEV_STATE_OFFLINE) &&
1325 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1326 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1328 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1329 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1330 atomic_set(&cdev->private->onoff, 0);
1332 spin_unlock_irq(cdev->ccwlock);
1333 /* Abort loop in case of pending signal. */
1334 if (signal_pending(current))
1341 * ccw_purge_blacklisted - purge unused, blacklisted devices
1343 * Unregister all ccw devices that are offline and on the blacklist.
1345 int ccw_purge_blacklisted(void)
1347 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1348 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1352 void ccw_device_set_disconnected(struct ccw_device *cdev)
1356 ccw_device_set_timeout(cdev, 0);
1357 cdev->private->flags.fake_irb = 0;
1358 cdev->private->state = DEV_STATE_DISCONNECTED;
1360 ccw_device_schedule_recovery();
1363 void ccw_device_set_notoper(struct ccw_device *cdev)
1365 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1367 CIO_TRACE_EVENT(2, "notoper");
1368 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1369 ccw_device_set_timeout(cdev, 0);
1370 cio_disable_subchannel(sch);
1371 cdev->private->state = DEV_STATE_NOT_OPER;
1374 enum io_sch_action {
1378 IO_SCH_UNREG_ATTACH,
1386 static enum io_sch_action sch_get_action(struct subchannel *sch)
1388 struct ccw_device *cdev;
1390 cdev = sch_get_cdev(sch);
1391 if (cio_update_schib(sch)) {
1392 /* Not operational. */
1394 return IO_SCH_UNREG;
1395 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1396 return IO_SCH_UNREG;
1397 return IO_SCH_ORPH_UNREG;
1401 return IO_SCH_ATTACH;
1402 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1403 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1404 return IO_SCH_UNREG_ATTACH;
1405 return IO_SCH_ORPH_ATTACH;
1407 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1408 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1409 return IO_SCH_UNREG;
1412 if (device_is_disconnected(cdev))
1413 return IO_SCH_REPROBE;
1415 return IO_SCH_VERIFY;
1416 if (cdev->private->state == DEV_STATE_NOT_OPER)
1417 return IO_SCH_UNREG_ATTACH;
1422 * io_subchannel_sch_event - process subchannel event
1424 * @process: non-zero if function is called in process context
1426 * An unspecified event occurred for this subchannel. Adjust data according
1427 * to the current operational state of the subchannel and device. Return
1428 * zero when the event has been handled sufficiently or -EAGAIN when this
1429 * function should be called again in process context.
1431 static int io_subchannel_sch_event(struct subchannel *sch, int process)
1433 unsigned long flags;
1434 struct ccw_device *cdev;
1435 struct ccw_dev_id dev_id;
1436 enum io_sch_action action;
1439 spin_lock_irqsave(sch->lock, flags);
1440 if (!device_is_registered(&sch->dev))
1442 if (work_pending(&sch->todo_work))
1444 cdev = sch_get_cdev(sch);
1445 if (cdev && work_pending(&cdev->private->todo_work))
1447 action = sch_get_action(sch);
1448 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1449 sch->schid.ssid, sch->schid.sch_no, process,
1451 /* Perform immediate actions while holding the lock. */
1453 case IO_SCH_REPROBE:
1454 /* Trigger device recognition. */
1455 ccw_device_trigger_reprobe(cdev);
1459 /* Trigger path verification. */
1460 io_subchannel_verify(sch);
1464 ccw_device_set_disconnected(cdev);
1467 case IO_SCH_ORPH_UNREG:
1468 case IO_SCH_ORPH_ATTACH:
1469 ccw_device_set_disconnected(cdev);
1471 case IO_SCH_UNREG_ATTACH:
1475 if (cdev->private->state == DEV_STATE_SENSE_ID) {
1477 * Note: delayed work triggered by this event
1478 * and repeated calls to sch_event are synchronized
1479 * by the above check for work_pending(cdev).
1481 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1483 ccw_device_set_notoper(cdev);
1491 spin_unlock_irqrestore(sch->lock, flags);
1492 /* All other actions require process context. */
1495 /* Handle attached ccw device. */
1497 case IO_SCH_ORPH_UNREG:
1498 case IO_SCH_ORPH_ATTACH:
1499 /* Move ccw device to orphanage. */
1500 rc = ccw_device_move_to_orph(cdev);
1504 case IO_SCH_UNREG_ATTACH:
1505 spin_lock_irqsave(sch->lock, flags);
1506 sch_set_cdev(sch, NULL);
1507 spin_unlock_irqrestore(sch->lock, flags);
1508 /* Unregister ccw device. */
1509 ccw_device_unregister(cdev);
1514 /* Handle subchannel. */
1516 case IO_SCH_ORPH_UNREG:
1518 css_sch_device_unregister(sch);
1520 case IO_SCH_ORPH_ATTACH:
1521 case IO_SCH_UNREG_ATTACH:
1523 dev_id.ssid = sch->schid.ssid;
1524 dev_id.devno = sch->schib.pmcw.dev;
1525 cdev = get_ccwdev_by_dev_id(&dev_id);
1527 sch_create_and_recog_new_device(sch);
1530 rc = ccw_device_move_to_sch(cdev, sch);
1532 /* Release reference from get_ccwdev_by_dev_id() */
1533 put_device(&cdev->dev);
1536 spin_lock_irqsave(sch->lock, flags);
1537 ccw_device_trigger_reprobe(cdev);
1538 spin_unlock_irqrestore(sch->lock, flags);
1539 /* Release reference from get_ccwdev_by_dev_id() */
1540 put_device(&cdev->dev);
1548 spin_unlock_irqrestore(sch->lock, flags);
1553 static void ccw_device_set_int_class(struct ccw_device *cdev)
1555 struct ccw_driver *cdrv = cdev->drv;
1557 /* Note: we interpret class 0 in this context as an uninitialized
1558 * field since it translates to a non-I/O interrupt class. */
1559 if (cdrv->int_class != 0)
1560 cdev->private->int_class = cdrv->int_class;
1562 cdev->private->int_class = IRQIO_CIO;
1565 #ifdef CONFIG_CCW_CONSOLE
1566 int __init ccw_device_enable_console(struct ccw_device *cdev)
1568 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1571 if (!cdev->drv || !cdev->handler)
1574 io_subchannel_init_fields(sch);
1575 rc = cio_commit_config(sch);
1578 sch->driver = &io_subchannel_driver;
1579 io_subchannel_recog(cdev, sch);
1580 /* Now wait for the async. recognition to come to an end. */
1581 spin_lock_irq(cdev->ccwlock);
1582 while (!dev_fsm_final_state(cdev))
1583 ccw_device_wait_idle(cdev);
1585 /* Hold on to an extra reference while device is online. */
1586 get_device(&cdev->dev);
1587 rc = ccw_device_online(cdev);
1591 while (!dev_fsm_final_state(cdev))
1592 ccw_device_wait_idle(cdev);
1594 if (cdev->private->state == DEV_STATE_ONLINE)
1599 spin_unlock_irq(cdev->ccwlock);
1600 if (rc) /* Give up online reference since onlining failed. */
1601 put_device(&cdev->dev);
1605 struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1607 struct io_subchannel_private *io_priv;
1608 struct ccw_device *cdev;
1609 struct subchannel *sch;
1611 sch = cio_probe_console();
1613 return ERR_CAST(sch);
1615 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1618 io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1619 sizeof(*io_priv->dma_area),
1620 &io_priv->dma_area_dma, GFP_KERNEL);
1621 if (!io_priv->dma_area)
1623 set_io_private(sch, io_priv);
1624 cdev = io_subchannel_create_ccwdev(sch);
1626 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1627 io_priv->dma_area, io_priv->dma_area_dma);
1628 set_io_private(sch, NULL);
1629 put_device(&sch->dev);
1634 ccw_device_set_int_class(cdev);
1640 put_device(&sch->dev);
1641 return ERR_PTR(-ENOMEM);
1644 void __init ccw_device_destroy_console(struct ccw_device *cdev)
1646 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1647 struct io_subchannel_private *io_priv = to_io_private(sch);
1649 set_io_private(sch, NULL);
1650 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1651 io_priv->dma_area, io_priv->dma_area_dma);
1652 put_device(&sch->dev);
1653 put_device(&cdev->dev);
1658 * ccw_device_wait_idle() - busy wait for device to become idle
1661 * Poll until activity control is zero, that is, no function or data
1662 * transfer is pending/active.
1663 * Called with device lock being held.
1665 void ccw_device_wait_idle(struct ccw_device *cdev)
1667 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1671 if (sch->schib.scsw.cmd.actl == 0)
1679 * get_ccwdev_by_busid() - obtain device from a bus id
1680 * @cdrv: driver the device is owned by
1681 * @bus_id: bus id of the device to be searched
1683 * This function searches all devices owned by @cdrv for a device with a bus
1684 * id matching @bus_id.
1686 * If a match is found, its reference count of the found device is increased
1687 * and it is returned; else %NULL is returned.
1689 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1694 dev = driver_find_device_by_name(&cdrv->driver, bus_id);
1696 return dev ? to_ccwdev(dev) : NULL;
1699 /************************** device driver handling ************************/
1701 /* This is the implementation of the ccw_driver class. The probe, remove
1702 * and release methods are initially very similar to the device_driver
1703 * implementations, with the difference that they have ccw_device
1706 * A ccw driver also contains the information that is needed for
1710 ccw_device_probe (struct device *dev)
1712 struct ccw_device *cdev = to_ccwdev(dev);
1713 struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1716 cdev->drv = cdrv; /* to let the driver call _set_online */
1717 ccw_device_set_int_class(cdev);
1718 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1721 cdev->private->int_class = IRQIO_CIO;
1728 static void ccw_device_remove(struct device *dev)
1730 struct ccw_device *cdev = to_ccwdev(dev);
1731 struct ccw_driver *cdrv = cdev->drv;
1732 struct subchannel *sch;
1738 spin_lock_irq(cdev->ccwlock);
1741 ret = ccw_device_offline(cdev);
1742 spin_unlock_irq(cdev->ccwlock);
1744 wait_event(cdev->private->wait_q,
1745 dev_fsm_final_state(cdev));
1747 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1748 "device 0.%x.%04x\n",
1749 ret, cdev->private->dev_id.ssid,
1750 cdev->private->dev_id.devno);
1751 /* Give up reference obtained in ccw_device_set_online(). */
1752 put_device(&cdev->dev);
1753 spin_lock_irq(cdev->ccwlock);
1755 ccw_device_set_timeout(cdev, 0);
1757 cdev->private->int_class = IRQIO_CIO;
1758 sch = to_subchannel(cdev->dev.parent);
1759 spin_unlock_irq(cdev->ccwlock);
1760 io_subchannel_quiesce(sch);
1761 __disable_cmf(cdev);
1764 static void ccw_device_shutdown(struct device *dev)
1766 struct ccw_device *cdev;
1768 cdev = to_ccwdev(dev);
1769 if (cdev->drv && cdev->drv->shutdown)
1770 cdev->drv->shutdown(cdev);
1771 __disable_cmf(cdev);
1774 static struct bus_type ccw_bus_type = {
1776 .match = ccw_bus_match,
1777 .uevent = ccw_uevent,
1778 .probe = ccw_device_probe,
1779 .remove = ccw_device_remove,
1780 .shutdown = ccw_device_shutdown,
1784 * ccw_driver_register() - register a ccw driver
1785 * @cdriver: driver to be registered
1787 * This function is mainly a wrapper around driver_register().
1789 * %0 on success and a negative error value on failure.
1791 int ccw_driver_register(struct ccw_driver *cdriver)
1793 struct device_driver *drv = &cdriver->driver;
1795 drv->bus = &ccw_bus_type;
1797 return driver_register(drv);
1801 * ccw_driver_unregister() - deregister a ccw driver
1802 * @cdriver: driver to be deregistered
1804 * This function is mainly a wrapper around driver_unregister().
1806 void ccw_driver_unregister(struct ccw_driver *cdriver)
1808 driver_unregister(&cdriver->driver);
1811 static void ccw_device_todo(struct work_struct *work)
1813 struct ccw_device_private *priv;
1814 struct ccw_device *cdev;
1815 struct subchannel *sch;
1816 enum cdev_todo todo;
1818 priv = container_of(work, struct ccw_device_private, todo_work);
1820 sch = to_subchannel(cdev->dev.parent);
1821 /* Find out todo. */
1822 spin_lock_irq(cdev->ccwlock);
1824 priv->todo = CDEV_TODO_NOTHING;
1825 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1826 priv->dev_id.ssid, priv->dev_id.devno, todo);
1827 spin_unlock_irq(cdev->ccwlock);
1830 case CDEV_TODO_ENABLE_CMF:
1833 case CDEV_TODO_REBIND:
1834 ccw_device_do_unbind_bind(cdev);
1836 case CDEV_TODO_REGISTER:
1837 io_subchannel_register(cdev);
1839 case CDEV_TODO_UNREG_EVAL:
1840 if (!sch_is_pseudo_sch(sch))
1841 css_schedule_eval(sch->schid);
1843 case CDEV_TODO_UNREG:
1844 spin_lock_irq(sch->lock);
1845 sch_set_cdev(sch, NULL);
1846 spin_unlock_irq(sch->lock);
1847 ccw_device_unregister(cdev);
1852 /* Release workqueue ref. */
1853 put_device(&cdev->dev);
1857 * ccw_device_sched_todo - schedule ccw device operation
1861 * Schedule the operation identified by @todo to be performed on the slow path
1862 * workqueue. Do nothing if another operation with higher priority is already
1863 * scheduled. Needs to be called with ccwdev lock held.
1865 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
1867 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
1868 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
1870 if (cdev->private->todo >= todo)
1872 cdev->private->todo = todo;
1873 /* Get workqueue ref. */
1874 if (!get_device(&cdev->dev))
1876 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
1877 /* Already queued, release workqueue ref. */
1878 put_device(&cdev->dev);
1883 * ccw_device_siosl() - initiate logging
1886 * This function is used to invoke model-dependent logging within the channel
1889 int ccw_device_siosl(struct ccw_device *cdev)
1891 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1893 return chsc_siosl(sch->schid);
1895 EXPORT_SYMBOL_GPL(ccw_device_siosl);
1897 EXPORT_SYMBOL(ccw_device_set_online);
1898 EXPORT_SYMBOL(ccw_device_set_offline);
1899 EXPORT_SYMBOL(ccw_driver_register);
1900 EXPORT_SYMBOL(ccw_driver_unregister);
1901 EXPORT_SYMBOL(get_ccwdev_by_busid);