1 // SPDX-License-Identifier: GPL-2.0
3 * driver for channel subsystem
5 * Copyright IBM Corp. 2002, 2010
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/reboot.h>
21 #include <linux/proc_fs.h>
22 #include <linux/genalloc.h>
23 #include <linux/dma-mapping.h>
29 #include "blacklist.h"
30 #include "cio_debug.h"
37 int css_init_done = 0;
41 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
42 static struct bus_type css_bus_type;
45 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
47 struct subchannel_id schid;
50 init_subchannel_id(&schid);
53 ret = fn(schid, data);
56 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
58 } while (schid.ssid++ < max_ssid);
65 int (*fn_known_sch)(struct subchannel *, void *);
66 int (*fn_unknown_sch)(struct subchannel_id, void *);
69 static int call_fn_known_sch(struct device *dev, void *data)
71 struct subchannel *sch = to_subchannel(dev);
72 struct cb_data *cb = data;
76 idset_sch_del(cb->set, sch->schid);
78 rc = cb->fn_known_sch(sch, cb->data);
82 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
84 struct cb_data *cb = data;
87 if (idset_sch_contains(cb->set, schid))
88 rc = cb->fn_unknown_sch(schid, cb->data);
92 static int call_fn_all_sch(struct subchannel_id schid, void *data)
94 struct cb_data *cb = data;
95 struct subchannel *sch;
98 sch = get_subchannel_by_schid(schid);
100 if (cb->fn_known_sch)
101 rc = cb->fn_known_sch(sch, cb->data);
102 put_device(&sch->dev);
104 if (cb->fn_unknown_sch)
105 rc = cb->fn_unknown_sch(schid, cb->data);
111 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
112 int (*fn_unknown)(struct subchannel_id,
119 cb.fn_known_sch = fn_known;
120 cb.fn_unknown_sch = fn_unknown;
122 if (fn_known && !fn_unknown) {
123 /* Skip idset allocation in case of known-only loop. */
125 return bus_for_each_dev(&css_bus_type, NULL, &cb,
129 cb.set = idset_sch_new();
131 /* fall back to brute force scanning in case of oom */
132 return for_each_subchannel(call_fn_all_sch, &cb);
136 /* Process registered subchannels. */
137 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
140 /* Process unregistered subchannels. */
142 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
149 static void css_sch_todo(struct work_struct *work);
151 static int css_sch_create_locks(struct subchannel *sch)
153 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
157 spin_lock_init(sch->lock);
158 mutex_init(&sch->reg_mutex);
163 static void css_subchannel_release(struct device *dev)
165 struct subchannel *sch = to_subchannel(dev);
167 sch->config.intparm = 0;
168 cio_commit_config(sch);
169 kfree(sch->driver_override);
174 static int css_validate_subchannel(struct subchannel_id schid,
179 switch (schib->pmcw.st) {
180 case SUBCHANNEL_TYPE_IO:
181 case SUBCHANNEL_TYPE_MSG:
182 if (!css_sch_is_valid(schib))
184 else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
185 CIO_MSG_EVENT(6, "Blacklisted device detected "
186 "at devno %04X, subchannel set %x\n",
187 schib->pmcw.dev, schid.ssid);
198 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
199 schid.ssid, schid.sch_no, schib->pmcw.st);
204 struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
207 struct subchannel *sch;
210 ret = css_validate_subchannel(schid, schib);
214 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
216 return ERR_PTR(-ENOMEM);
220 sch->st = schib->pmcw.st;
222 ret = css_sch_create_locks(sch);
226 INIT_WORK(&sch->todo_work, css_sch_todo);
227 sch->dev.release = &css_subchannel_release;
228 sch->dev.dma_mask = &sch->dma_mask;
229 device_initialize(&sch->dev);
231 * The physical addresses for some of the dma structures that can
232 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
234 ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
238 * But we don't have such restrictions imposed on the stuff that
239 * is handled by the streaming API.
241 ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
252 static int css_sch_device_register(struct subchannel *sch)
256 mutex_lock(&sch->reg_mutex);
257 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
259 ret = device_add(&sch->dev);
260 mutex_unlock(&sch->reg_mutex);
265 * css_sch_device_unregister - unregister a subchannel
266 * @sch: subchannel to be unregistered
268 void css_sch_device_unregister(struct subchannel *sch)
270 mutex_lock(&sch->reg_mutex);
271 if (device_is_registered(&sch->dev))
272 device_unregister(&sch->dev);
273 mutex_unlock(&sch->reg_mutex);
275 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
277 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
282 memset(ssd, 0, sizeof(struct chsc_ssd_info));
283 ssd->path_mask = pmcw->pim;
284 for (i = 0; i < 8; i++) {
286 if (pmcw->pim & mask) {
287 chp_id_init(&ssd->chpid[i]);
288 ssd->chpid[i].id = pmcw->chpid[i];
293 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
298 for (i = 0; i < 8; i++) {
300 if (ssd->path_mask & mask)
301 chp_new(ssd->chpid[i]);
305 void css_update_ssd_info(struct subchannel *sch)
309 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
311 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
313 ssd_register_chpids(&sch->ssd_info);
316 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
319 struct subchannel *sch = to_subchannel(dev);
321 return sprintf(buf, "%01x\n", sch->st);
324 static DEVICE_ATTR_RO(type);
326 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
329 struct subchannel *sch = to_subchannel(dev);
331 return sprintf(buf, "css:t%01X\n", sch->st);
334 static DEVICE_ATTR_RO(modalias);
336 static ssize_t driver_override_store(struct device *dev,
337 struct device_attribute *attr,
338 const char *buf, size_t count)
340 struct subchannel *sch = to_subchannel(dev);
343 ret = driver_set_override(dev, &sch->driver_override, buf, count);
350 static ssize_t driver_override_show(struct device *dev,
351 struct device_attribute *attr, char *buf)
353 struct subchannel *sch = to_subchannel(dev);
357 len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
361 static DEVICE_ATTR_RW(driver_override);
363 static struct attribute *subch_attrs[] = {
365 &dev_attr_modalias.attr,
366 &dev_attr_driver_override.attr,
370 static struct attribute_group subch_attr_group = {
371 .attrs = subch_attrs,
374 static const struct attribute_group *default_subch_attr_groups[] = {
379 static ssize_t chpids_show(struct device *dev,
380 struct device_attribute *attr,
383 struct subchannel *sch = to_subchannel(dev);
384 struct chsc_ssd_info *ssd = &sch->ssd_info;
389 for (chp = 0; chp < 8; chp++) {
391 if (ssd->path_mask & mask)
392 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
394 ret += sprintf(buf + ret, "00 ");
396 ret += sprintf(buf + ret, "\n");
399 static DEVICE_ATTR_RO(chpids);
401 static ssize_t pimpampom_show(struct device *dev,
402 struct device_attribute *attr,
405 struct subchannel *sch = to_subchannel(dev);
406 struct pmcw *pmcw = &sch->schib.pmcw;
408 return sprintf(buf, "%02x %02x %02x\n",
409 pmcw->pim, pmcw->pam, pmcw->pom);
411 static DEVICE_ATTR_RO(pimpampom);
413 static ssize_t dev_busid_show(struct device *dev,
414 struct device_attribute *attr,
417 struct subchannel *sch = to_subchannel(dev);
418 struct pmcw *pmcw = &sch->schib.pmcw;
420 if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
421 (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
422 return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
425 return sysfs_emit(buf, "none\n");
427 static DEVICE_ATTR_RO(dev_busid);
429 static struct attribute *io_subchannel_type_attrs[] = {
430 &dev_attr_chpids.attr,
431 &dev_attr_pimpampom.attr,
432 &dev_attr_dev_busid.attr,
435 ATTRIBUTE_GROUPS(io_subchannel_type);
437 static const struct device_type io_subchannel_type = {
438 .groups = io_subchannel_type_groups,
441 int css_register_subchannel(struct subchannel *sch)
445 /* Initialize the subchannel structure */
446 sch->dev.parent = &channel_subsystems[0]->device;
447 sch->dev.bus = &css_bus_type;
448 sch->dev.groups = default_subch_attr_groups;
450 if (sch->st == SUBCHANNEL_TYPE_IO)
451 sch->dev.type = &io_subchannel_type;
453 css_update_ssd_info(sch);
454 /* make it known to the system */
455 ret = css_sch_device_register(sch);
457 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
458 sch->schid.ssid, sch->schid.sch_no, ret);
464 static int css_probe_device(struct subchannel_id schid, struct schib *schib)
466 struct subchannel *sch;
469 sch = css_alloc_subchannel(schid, schib);
473 ret = css_register_subchannel(sch);
475 put_device(&sch->dev);
481 check_subchannel(struct device *dev, const void *data)
483 struct subchannel *sch;
484 struct subchannel_id *schid = (void *)data;
486 sch = to_subchannel(dev);
487 return schid_equal(&sch->schid, schid);
491 get_subchannel_by_schid(struct subchannel_id schid)
495 dev = bus_find_device(&css_bus_type, NULL,
496 &schid, check_subchannel);
498 return dev ? to_subchannel(dev) : NULL;
502 * css_sch_is_valid() - check if a subchannel is valid
503 * @schib: subchannel information block for the subchannel
505 int css_sch_is_valid(struct schib *schib)
507 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
509 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
513 EXPORT_SYMBOL_GPL(css_sch_is_valid);
515 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
521 /* Will be done on the slow path. */
525 * The first subchannel that is not-operational (ccode==3)
526 * indicates that there aren't any more devices available.
527 * If stsch gets an exception, it means the current subchannel set
530 ccode = stsch(schid, &schib);
532 return (ccode == 3) ? -ENXIO : ccode;
534 return css_probe_device(schid, &schib);
537 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
542 if (sch->driver->sch_event)
543 ret = sch->driver->sch_event(sch, slow);
546 "Got subchannel machine check but "
547 "no sch_event handler provided.\n");
549 if (ret != 0 && ret != -EAGAIN) {
550 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
551 sch->schid.ssid, sch->schid.sch_no, ret);
556 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
558 struct subchannel *sch;
561 sch = get_subchannel_by_schid(schid);
563 ret = css_evaluate_known_subchannel(sch, slow);
564 put_device(&sch->dev);
566 ret = css_evaluate_new_subchannel(schid, slow);
568 css_schedule_eval(schid);
572 * css_sched_sch_todo - schedule a subchannel operation
576 * Schedule the operation identified by @todo to be performed on the slow path
577 * workqueue. Do nothing if another operation with higher priority is already
578 * scheduled. Needs to be called with subchannel lock held.
580 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
582 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
583 sch->schid.ssid, sch->schid.sch_no, todo);
584 if (sch->todo >= todo)
586 /* Get workqueue ref. */
587 if (!get_device(&sch->dev))
590 if (!queue_work(cio_work_q, &sch->todo_work)) {
591 /* Already queued, release workqueue ref. */
592 put_device(&sch->dev);
595 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
597 static void css_sch_todo(struct work_struct *work)
599 struct subchannel *sch;
603 sch = container_of(work, struct subchannel, todo_work);
605 spin_lock_irq(sch->lock);
607 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
608 sch->schid.sch_no, todo);
609 sch->todo = SCH_TODO_NOTHING;
610 spin_unlock_irq(sch->lock);
613 case SCH_TODO_NOTHING:
616 ret = css_evaluate_known_subchannel(sch, 1);
617 if (ret == -EAGAIN) {
618 spin_lock_irq(sch->lock);
619 css_sched_sch_todo(sch, todo);
620 spin_unlock_irq(sch->lock);
624 css_sch_device_unregister(sch);
627 /* Release workqueue ref. */
628 put_device(&sch->dev);
631 static struct idset *slow_subchannel_set;
632 static DEFINE_SPINLOCK(slow_subchannel_lock);
633 static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
634 static atomic_t css_eval_scheduled;
636 static int __init slow_subchannel_init(void)
638 atomic_set(&css_eval_scheduled, 0);
639 slow_subchannel_set = idset_sch_new();
640 if (!slow_subchannel_set) {
641 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
647 static int slow_eval_known_fn(struct subchannel *sch, void *data)
652 spin_lock_irq(&slow_subchannel_lock);
653 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
654 idset_sch_del(slow_subchannel_set, sch->schid);
655 spin_unlock_irq(&slow_subchannel_lock);
657 rc = css_evaluate_known_subchannel(sch, 1);
659 css_schedule_eval(sch->schid);
661 * The loop might take long time for platforms with lots of
662 * known devices. Allow scheduling here.
669 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
674 spin_lock_irq(&slow_subchannel_lock);
675 eval = idset_sch_contains(slow_subchannel_set, schid);
676 idset_sch_del(slow_subchannel_set, schid);
677 spin_unlock_irq(&slow_subchannel_lock);
679 rc = css_evaluate_new_subchannel(schid, 1);
682 css_schedule_eval(schid);
688 /* These should abort looping */
689 spin_lock_irq(&slow_subchannel_lock);
690 idset_sch_del_subseq(slow_subchannel_set, schid);
691 spin_unlock_irq(&slow_subchannel_lock);
696 /* Allow scheduling here since the containing loop might
703 static void css_slow_path_func(struct work_struct *unused)
707 CIO_TRACE_EVENT(4, "slowpath");
708 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
710 spin_lock_irqsave(&slow_subchannel_lock, flags);
711 if (idset_is_empty(slow_subchannel_set)) {
712 atomic_set(&css_eval_scheduled, 0);
713 wake_up(&css_eval_wq);
715 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
718 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
719 struct workqueue_struct *cio_work_q;
721 void css_schedule_eval(struct subchannel_id schid)
725 spin_lock_irqsave(&slow_subchannel_lock, flags);
726 idset_sch_add(slow_subchannel_set, schid);
727 atomic_set(&css_eval_scheduled, 1);
728 queue_delayed_work(cio_work_q, &slow_path_work, 0);
729 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
732 void css_schedule_eval_all(void)
736 spin_lock_irqsave(&slow_subchannel_lock, flags);
737 idset_fill(slow_subchannel_set);
738 atomic_set(&css_eval_scheduled, 1);
739 queue_delayed_work(cio_work_q, &slow_path_work, 0);
740 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
743 static int __unset_registered(struct device *dev, void *data)
745 struct idset *set = data;
746 struct subchannel *sch = to_subchannel(dev);
748 idset_sch_del(set, sch->schid);
752 static int __unset_online(struct device *dev, void *data)
754 struct idset *set = data;
755 struct subchannel *sch = to_subchannel(dev);
757 if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
758 idset_sch_del(set, sch->schid);
763 void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
768 /* Find unregistered subchannels. */
769 set = idset_sch_new();
772 css_schedule_eval_all();
778 bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
780 case CSS_EVAL_NOT_ONLINE:
781 bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
787 /* Apply to slow_subchannel_set. */
788 spin_lock_irqsave(&slow_subchannel_lock, flags);
789 idset_add_set(slow_subchannel_set, set);
790 atomic_set(&css_eval_scheduled, 1);
791 queue_delayed_work(cio_work_q, &slow_path_work, delay);
792 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
796 void css_wait_for_slow_path(void)
798 flush_workqueue(cio_work_q);
801 /* Schedule reprobing of all unregistered subchannels. */
802 void css_schedule_reprobe(void)
804 /* Schedule with a delay to allow merging of subsequent calls. */
805 css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
807 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
810 * Called from the machine check handler for subchannel report words.
812 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
814 struct subchannel_id mchk_schid;
815 struct subchannel *sch;
818 css_schedule_eval_all();
821 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
822 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
823 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
824 crw0->erc, crw0->rsid);
826 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
827 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
828 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
829 crw1->anc, crw1->erc, crw1->rsid);
830 init_subchannel_id(&mchk_schid);
831 mchk_schid.sch_no = crw0->rsid;
833 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
835 if (crw0->erc == CRW_ERC_PMOD) {
836 sch = get_subchannel_by_schid(mchk_schid);
838 css_update_ssd_info(sch);
839 put_device(&sch->dev);
843 * Since we are always presented with IPI in the CRW, we have to
844 * use stsch() to find out if the subchannel in question has come
847 css_evaluate_subchannel(mchk_schid, 0);
851 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
855 if (css_general_characteristics.mcss) {
856 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
857 css->global_pgid.pgid_high.ext_cssid.cssid =
858 css->id_valid ? css->cssid : 0;
860 css->global_pgid.pgid_high.cpu_addr = stap();
863 css->global_pgid.cpu_id = cpu_id.ident;
864 css->global_pgid.cpu_model = cpu_id.machine;
865 css->global_pgid.tod_high = tod_high;
868 static void channel_subsystem_release(struct device *dev)
870 struct channel_subsystem *css = to_css(dev);
872 mutex_destroy(&css->mutex);
876 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
879 struct channel_subsystem *css = to_css(dev);
884 return sprintf(buf, "%x\n", css->cssid);
886 static DEVICE_ATTR_RO(real_cssid);
888 static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
889 const char *buf, size_t count)
891 CIO_TRACE_EVENT(4, "usr-rescan");
893 css_schedule_eval_all();
898 static DEVICE_ATTR_WO(rescan);
900 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
903 struct channel_subsystem *css = to_css(dev);
906 mutex_lock(&css->mutex);
907 ret = sprintf(buf, "%x\n", css->cm_enabled);
908 mutex_unlock(&css->mutex);
912 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
913 const char *buf, size_t count)
915 struct channel_subsystem *css = to_css(dev);
919 ret = kstrtoul(buf, 16, &val);
922 mutex_lock(&css->mutex);
925 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
928 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
933 mutex_unlock(&css->mutex);
934 return ret < 0 ? ret : count;
936 static DEVICE_ATTR_RW(cm_enable);
938 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
941 return css_chsc_characteristics.secm ? attr->mode : 0;
944 static struct attribute *cssdev_attrs[] = {
945 &dev_attr_real_cssid.attr,
946 &dev_attr_rescan.attr,
950 static struct attribute_group cssdev_attr_group = {
951 .attrs = cssdev_attrs,
954 static struct attribute *cssdev_cm_attrs[] = {
955 &dev_attr_cm_enable.attr,
959 static struct attribute_group cssdev_cm_attr_group = {
960 .attrs = cssdev_cm_attrs,
961 .is_visible = cm_enable_mode,
964 static const struct attribute_group *cssdev_attr_groups[] = {
966 &cssdev_cm_attr_group,
970 static int __init setup_css(int nr)
972 struct channel_subsystem *css;
975 css = kzalloc(sizeof(*css), GFP_KERNEL);
979 channel_subsystems[nr] = css;
980 dev_set_name(&css->device, "css%x", nr);
981 css->device.groups = cssdev_attr_groups;
982 css->device.release = channel_subsystem_release;
984 * We currently allocate notifier bits with this (using
985 * css->device as the device argument with the DMA API)
986 * and are fine with 64 bit addresses.
988 ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
994 mutex_init(&css->mutex);
995 ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
997 css->id_valid = true;
998 pr_info("Partition identifier %01x.%01x\n", css->cssid,
1001 css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
1003 ret = device_register(&css->device);
1005 put_device(&css->device);
1009 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
1011 if (!css->pseudo_subchannel) {
1012 device_unregister(&css->device);
1017 css->pseudo_subchannel->dev.parent = &css->device;
1018 css->pseudo_subchannel->dev.release = css_subchannel_release;
1019 mutex_init(&css->pseudo_subchannel->reg_mutex);
1020 ret = css_sch_create_locks(css->pseudo_subchannel);
1022 kfree(css->pseudo_subchannel);
1023 device_unregister(&css->device);
1027 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1028 ret = device_register(&css->pseudo_subchannel->dev);
1030 put_device(&css->pseudo_subchannel->dev);
1031 device_unregister(&css->device);
1037 channel_subsystems[nr] = NULL;
1041 static int css_reboot_event(struct notifier_block *this,
1042 unsigned long event,
1045 struct channel_subsystem *css;
1050 mutex_lock(&css->mutex);
1051 if (css->cm_enabled)
1052 if (chsc_secm(css, 0))
1054 mutex_unlock(&css->mutex);
1060 static struct notifier_block css_reboot_notifier = {
1061 .notifier_call = css_reboot_event,
1064 #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1065 static struct gen_pool *cio_dma_pool;
1067 /* Currently cio supports only a single css */
1068 struct device *cio_get_dma_css_dev(void)
1070 return &channel_subsystems[0]->device;
1073 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1075 struct gen_pool *gp_dma;
1077 dma_addr_t dma_addr;
1080 gp_dma = gen_pool_create(3, -1);
1083 for (i = 0; i < nr_pages; ++i) {
1084 cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1088 gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1089 dma_addr, PAGE_SIZE, -1);
1094 static void __gp_dma_free_dma(struct gen_pool *pool,
1095 struct gen_pool_chunk *chunk, void *data)
1097 size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1099 dma_free_coherent((struct device *) data, chunk_size,
1100 (void *) chunk->start_addr,
1101 (dma_addr_t) chunk->phys_addr);
1104 void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1108 /* this is quite ugly but no better idea */
1109 gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1110 gen_pool_destroy(gp_dma);
1113 static int cio_dma_pool_init(void)
1115 /* No need to free up the resources: compiled in */
1116 cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1122 void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1125 dma_addr_t dma_addr;
1131 addr = gen_pool_alloc(gp_dma, size);
1133 chunk_size = round_up(size, PAGE_SIZE);
1134 addr = (unsigned long) dma_alloc_coherent(dma_dev,
1135 chunk_size, &dma_addr, CIO_DMA_GFP);
1138 gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1139 addr = gen_pool_alloc(gp_dma, size);
1141 return (void *) addr;
1144 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1148 memset(cpu_addr, 0, size);
1149 gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1153 * Allocate dma memory from the css global pool. Intended for memory not
1154 * specific to any single device within the css. The allocated memory
1155 * is not guaranteed to be 31-bit addressable.
1157 * Caution: Not suitable for early stuff like console.
1159 void *cio_dma_zalloc(size_t size)
1161 return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1164 void cio_dma_free(void *cpu_addr, size_t size)
1166 cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1170 * Now that the driver core is running, we can setup our channel subsystem.
1171 * The struct subchannel's are created during probing.
1173 static int __init css_bus_init(void)
1181 chsc_determine_css_characteristics();
1182 /* Try to enable MSS. */
1183 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1187 max_ssid = __MAX_SSID;
1189 ret = slow_subchannel_init();
1193 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1197 if ((ret = bus_register(&css_bus_type)))
1200 /* Setup css structure. */
1201 for (i = 0; i <= MAX_CSS_IDX; i++) {
1204 goto out_unregister;
1206 ret = register_reboot_notifier(&css_reboot_notifier);
1208 goto out_unregister;
1209 ret = cio_dma_pool_init();
1211 goto out_unregister_rn;
1215 /* Enable default isc for I/O subchannels. */
1216 isc_register(IO_SCH_ISC);
1220 unregister_reboot_notifier(&css_reboot_notifier);
1223 struct channel_subsystem *css = channel_subsystems[i];
1224 device_unregister(&css->pseudo_subchannel->dev);
1225 device_unregister(&css->device);
1227 bus_unregister(&css_bus_type);
1229 crw_unregister_handler(CRW_RSC_SCH);
1230 idset_free(slow_subchannel_set);
1231 chsc_init_cleanup();
1232 pr_alert("The CSS device driver initialization failed with "
1237 static void __init css_bus_cleanup(void)
1239 struct channel_subsystem *css;
1242 device_unregister(&css->pseudo_subchannel->dev);
1243 device_unregister(&css->device);
1245 bus_unregister(&css_bus_type);
1246 crw_unregister_handler(CRW_RSC_SCH);
1247 idset_free(slow_subchannel_set);
1248 chsc_init_cleanup();
1249 isc_unregister(IO_SCH_ISC);
1252 static int __init channel_subsystem_init(void)
1256 ret = css_bus_init();
1259 cio_work_q = create_singlethread_workqueue("cio");
1264 ret = io_subchannel_init();
1268 /* Register subchannels which are already in use. */
1269 cio_register_early_subchannels();
1270 /* Start initial subchannel evaluation. */
1271 css_schedule_eval_all();
1275 destroy_workqueue(cio_work_q);
1280 subsys_initcall(channel_subsystem_init);
1282 static int css_settle(struct device_driver *drv, void *unused)
1284 struct css_driver *cssdrv = to_cssdriver(drv);
1287 return cssdrv->settle();
1291 int css_complete_work(void)
1295 /* Wait for the evaluation of subchannels to finish. */
1296 ret = wait_event_interruptible(css_eval_wq,
1297 atomic_read(&css_eval_scheduled) == 0);
1300 flush_workqueue(cio_work_q);
1301 /* Wait for the subchannel type specific initialization to finish */
1302 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1307 * Wait for the initialization of devices to finish, to make sure we are
1308 * done with our setup if the search for the root device starts.
1310 static int __init channel_subsystem_init_sync(void)
1312 css_complete_work();
1315 subsys_initcall_sync(channel_subsystem_init_sync);
1317 #ifdef CONFIG_PROC_FS
1318 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1319 size_t count, loff_t *ppos)
1323 /* Handle pending CRW's. */
1324 crw_wait_for_channel_report();
1325 ret = css_complete_work();
1327 return ret ? ret : count;
1330 static const struct proc_ops cio_settle_proc_ops = {
1331 .proc_open = nonseekable_open,
1332 .proc_write = cio_settle_write,
1333 .proc_lseek = no_llseek,
1336 static int __init cio_settle_init(void)
1338 struct proc_dir_entry *entry;
1340 entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1345 device_initcall(cio_settle_init);
1346 #endif /*CONFIG_PROC_FS*/
1348 int sch_is_pseudo_sch(struct subchannel *sch)
1350 if (!sch->dev.parent)
1352 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1355 static int css_bus_match(struct device *dev, struct device_driver *drv)
1357 struct subchannel *sch = to_subchannel(dev);
1358 struct css_driver *driver = to_cssdriver(drv);
1359 struct css_device_id *id;
1361 /* When driver_override is set, only bind to the matching driver */
1362 if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1365 for (id = driver->subchannel_type; id->match_flags; id++) {
1366 if (sch->st == id->type)
1373 static int css_probe(struct device *dev)
1375 struct subchannel *sch;
1378 sch = to_subchannel(dev);
1379 sch->driver = to_cssdriver(dev->driver);
1380 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1386 static void css_remove(struct device *dev)
1388 struct subchannel *sch;
1390 sch = to_subchannel(dev);
1391 if (sch->driver->remove)
1392 sch->driver->remove(sch);
1396 static void css_shutdown(struct device *dev)
1398 struct subchannel *sch;
1400 sch = to_subchannel(dev);
1401 if (sch->driver && sch->driver->shutdown)
1402 sch->driver->shutdown(sch);
1405 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1407 struct subchannel *sch = to_subchannel(dev);
1410 ret = add_uevent_var(env, "ST=%01X", sch->st);
1413 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1417 static struct bus_type css_bus_type = {
1419 .match = css_bus_match,
1421 .remove = css_remove,
1422 .shutdown = css_shutdown,
1423 .uevent = css_uevent,
1427 * css_driver_register - register a css driver
1428 * @cdrv: css driver to register
1430 * This is mainly a wrapper around driver_register that sets name
1431 * and bus_type in the embedded struct device_driver correctly.
1433 int css_driver_register(struct css_driver *cdrv)
1435 cdrv->drv.bus = &css_bus_type;
1436 return driver_register(&cdrv->drv);
1438 EXPORT_SYMBOL_GPL(css_driver_register);
1441 * css_driver_unregister - unregister a css driver
1442 * @cdrv: css driver to unregister
1444 * This is a wrapper around driver_unregister.
1446 void css_driver_unregister(struct css_driver *cdrv)
1448 driver_unregister(&cdrv->drv);
1450 EXPORT_SYMBOL_GPL(css_driver_unregister);