2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
22 #include "cio_debug.h"
26 static void *sei_page;
28 static int new_channel_path(int chpid);
31 set_chp_logically_online(int chp, int onoff)
33 css[0]->chps[chp]->state = onoff;
37 get_chp_status(int chp)
39 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
43 chsc_validate_chpids(struct subchannel *sch)
47 for (chp = 0; chp <= 7; chp++) {
49 if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
50 /* disable using this path */
56 chpid_is_actually_online(int chp)
60 state = get_chp_status(chp);
63 queue_work(slow_path_wq, &slow_path_work);
68 /* FIXME: this is _always_ called for every subchannel. shouldn't we
69 * process more than one at a time? */
71 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
76 struct chsc_header request;
80 u16 f_sch; /* first subchannel */
82 u16 l_sch; /* last subchannel */
84 struct chsc_header response;
88 u8 st : 3; /* subchannel type */
90 u8 unit_addr; /* unit address */
91 u16 devno; /* device number */
94 u16 sch; /* subchannel */
95 u8 chpid[8]; /* chpids 0-7 */
96 u16 fla[8]; /* full link addresses 0-7 */
101 ssd_area->request = (struct chsc_header) {
106 ssd_area->ssid = sch->schid.ssid;
107 ssd_area->f_sch = sch->schid.sch_no;
108 ssd_area->l_sch = sch->schid.sch_no;
110 ccode = chsc(ssd_area);
112 pr_debug("chsc returned with ccode = %d\n", ccode);
113 return (ccode == 3) ? -ENODEV : -EBUSY;
116 switch (ssd_area->response.code) {
117 case 0x0001: /* everything ok */
120 CIO_CRW_EVENT(2, "Invalid command!\n");
123 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
126 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
129 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
130 ssd_area->response.code);
135 * ssd_area->st stores the type of the detected
136 * subchannel, with the following definitions:
138 * 0: I/O subchannel: All fields have meaning
139 * 1: CHSC subchannel: Only sch_val, st and sch
141 * 2: Message subchannel: All fields except unit_addr
143 * 3: ADM subchannel: Only sch_val, st and sch
146 * Other types are currently undefined.
148 if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 CIO_CRW_EVENT(0, "Strange subchannel type %d"
150 " for sch 0.%x.%04x\n", ssd_area->st,
151 sch->schid.ssid, sch->schid.sch_no);
153 * There may have been a new subchannel type defined in the
154 * time since this code was written; since we don't know which
155 * fields have meaning and what to do with it we just jump out
159 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
160 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
161 sch->schid.ssid, sch->schid.sch_no,
164 sch->ssd_info.valid = 1;
165 sch->ssd_info.type = ssd_area->st;
168 if (ssd_area->st == 0 || ssd_area->st == 2) {
169 for (j = 0; j < 8; j++) {
170 if (!((0x80 >> j) & ssd_area->path_mask &
171 ssd_area->fla_valid_mask))
173 sch->ssd_info.chpid[j] = ssd_area->chpid[j];
174 sch->ssd_info.fla[j] = ssd_area->fla[j];
181 css_get_ssd_info(struct subchannel *sch)
186 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
189 spin_lock_irq(&sch->lock);
190 ret = chsc_get_sch_desc_irq(sch, page);
192 static int cio_chsc_err_msg;
194 if (!cio_chsc_err_msg) {
196 "chsc_get_sch_descriptions:"
197 " Error %d while doing chsc; "
198 "processing some machine checks may "
200 cio_chsc_err_msg = 1;
203 spin_unlock_irq(&sch->lock);
204 free_page((unsigned long)page);
207 /* Allocate channel path structures, if needed. */
208 for (j = 0; j < 8; j++) {
209 chpid = sch->ssd_info.chpid[j];
210 if (chpid && (get_chp_status(chpid) < 0))
211 new_channel_path(chpid);
218 s390_subchannel_remove_chpid(struct device *dev, void *data)
222 struct subchannel *sch;
223 struct channel_path *chpid;
226 sch = to_subchannel(dev);
228 for (j = 0; j < 8; j++)
229 if (sch->schib.pmcw.chpid[j] == chpid->id)
235 spin_lock_irq(&sch->lock);
237 stsch(sch->schid, &schib);
240 memcpy(&sch->schib, &schib, sizeof(struct schib));
241 /* Check for single path devices. */
242 if (sch->schib.pmcw.pim == 0x80)
244 if (sch->vpm == mask)
247 if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
248 SCSW_ACTL_HALT_PEND |
249 SCSW_ACTL_START_PEND |
250 SCSW_ACTL_RESUME_PEND)) &&
251 (sch->schib.pmcw.lpum == mask)) {
252 int cc = cio_cancel(sch);
262 if (sch->driver && sch->driver->termination)
263 sch->driver->termination(&sch->dev);
266 } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
267 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
268 (sch->schib.pmcw.lpum == mask)) {
275 if (sch->driver && sch->driver->termination)
276 sch->driver->termination(&sch->dev);
280 /* trigger path verification. */
281 if (sch->driver && sch->driver->verify)
282 sch->driver->verify(&sch->dev);
284 spin_unlock_irq(&sch->lock);
287 spin_unlock_irq(&sch->lock);
289 if (css_enqueue_subchannel_slow(sch->schid)) {
290 css_clear_subchannel_slow_list();
297 s390_set_chpid_offline( __u8 chpid)
302 sprintf(dbf_txt, "chpr%x", chpid);
303 CIO_TRACE_EVENT(2, dbf_txt);
305 if (get_chp_status(chpid) <= 0)
307 dev = get_device(&css[0]->chps[chpid]->dev);
308 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
309 s390_subchannel_remove_chpid);
311 if (need_rescan || css_slow_subchannels_exist())
312 queue_work(slow_path_wq, &slow_path_work);
316 struct res_acc_data {
317 struct channel_path *chp;
323 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
330 for (chp = 0; chp <= 7; chp++)
332 * check if chpid is in information updated by ssd
334 if (sch->ssd_info.valid &&
335 sch->ssd_info.chpid[chp] == res_data->chp->id &&
336 (sch->ssd_info.fla[chp] & res_data->fla_mask)
346 * Do a stsch to update our subchannel structure with the
347 * new path information and eventually check for logically
350 ccode = stsch(sch->schid, &sch->schib);
358 s390_process_res_acc_new_sch(struct subchannel_id schid)
363 * We don't know the device yet, but since a path
364 * may be available now to the device we'll have
365 * to do recognition again.
366 * Since we don't have any idea about which chpid
367 * that beast may be on we'll have to do a stsch
368 * on all devices, grr...
370 if (stsch_err(schid, &schib))
372 return need_rescan ? -EAGAIN : -ENXIO;
374 /* Put it on the slow path. */
375 ret = css_enqueue_subchannel_slow(schid);
377 css_clear_subchannel_slow_list();
385 __s390_process_res_acc(struct subchannel_id schid, void *data)
387 int chp_mask, old_lpm;
388 struct res_acc_data *res_data;
389 struct subchannel *sch;
391 res_data = (struct res_acc_data *)data;
392 sch = get_subchannel_by_schid(schid);
394 /* Check if a subchannel is newly available. */
395 return s390_process_res_acc_new_sch(schid);
397 spin_lock_irq(&sch->lock);
399 chp_mask = s390_process_res_acc_sch(res_data, sch);
402 spin_unlock_irq(&sch->lock);
406 sch->lpm = ((sch->schib.pmcw.pim &
407 sch->schib.pmcw.pam &
409 | chp_mask) & sch->opm;
410 if (!old_lpm && sch->lpm)
411 device_trigger_reprobe(sch);
412 else if (sch->driver && sch->driver->verify)
413 sch->driver->verify(&sch->dev);
415 spin_unlock_irq(&sch->lock);
416 put_device(&sch->dev);
417 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
422 s390_process_res_acc (struct res_acc_data *res_data)
427 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
428 CIO_TRACE_EVENT( 2, dbf_txt);
429 if (res_data->fla != 0) {
430 sprintf(dbf_txt, "fla%x", res_data->fla);
431 CIO_TRACE_EVENT( 2, dbf_txt);
435 * I/O resources may have become accessible.
436 * Scan through all subchannels that may be concerned and
437 * do a validation on those.
438 * The more information we have (info), the less scanning
439 * will we have to do.
441 rc = for_each_subchannel(__s390_process_res_acc, res_data);
442 if (css_slow_subchannels_exist())
444 else if (rc != -EAGAIN)
450 __get_chpid_from_lir(void *data)
456 /* incident-node descriptor */
458 /* attached-node descriptor */
460 /* incident-specific information */
464 lir = (struct lir*) data;
466 /* NULL link incident record */
468 if (!(lir->indesc[0]&0xc0000000))
469 /* node descriptor not valid */
471 if (!(lir->indesc[0]&0x10000000))
472 /* don't handle device-type nodes - FIXME */
474 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
476 return (u16) (lir->indesc[0]&0x000000ff);
480 chsc_process_crw(void)
483 struct res_acc_data res_data;
485 struct chsc_header request;
489 struct chsc_header response;
492 u8 vf; /* validity flags */
493 u8 rs; /* reporting source */
494 u8 cc; /* content code */
495 u16 fla; /* full link address */
496 u16 rsid; /* reporting source id */
499 u32 ccdf[96]; /* content-code dependent field */
500 /* ccdf has to be big enough for a link-incident record */
506 * build the chsc request block for store event information
508 * This function is only called by the machine check handler thread,
509 * so we don't need locking for the sei_page.
513 CIO_TRACE_EVENT( 2, "prcss");
518 memset(sei_area, 0, sizeof(*sei_area));
519 memset(&res_data, 0, sizeof(struct res_acc_data));
520 sei_area->request = (struct chsc_header) {
525 ccode = chsc(sei_area);
529 switch (sei_area->response.code) {
530 /* for debug purposes, check for problems */
532 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
533 "successfully stored\n");
534 break; /* everything ok */
537 "chsc_process_crw: invalid command!\n");
540 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
544 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
545 "information stored\n");
548 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
549 sei_area->response.code);
553 /* Check if we might have lost some information. */
554 if (sei_area->flags & 0x40)
555 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
556 "has been lost due to overflow!\n");
558 if (sei_area->rs != 4) {
559 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
560 "(%04X) isn't a chpid!\n",
565 /* which kind of information was stored? */
566 switch (sei_area->cc) {
567 case 1: /* link incident*/
568 CIO_CRW_EVENT(4, "chsc_process_crw: "
569 "channel subsystem reports link incident,"
570 " reporting source is chpid %x\n",
572 chpid = __get_chpid_from_lir(sei_area->ccdf);
574 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
577 s390_set_chpid_offline(chpid);
580 case 2: /* i/o resource accessibiliy */
581 CIO_CRW_EVENT(4, "chsc_process_crw: "
582 "channel subsystem reports some I/O "
583 "devices may have become accessible\n");
584 pr_debug("Data received after sei: \n");
585 pr_debug("Validity flags: %x\n", sei_area->vf);
587 /* allocate a new channel path structure, if needed */
588 status = get_chp_status(sei_area->rsid);
590 new_channel_path(sei_area->rsid);
593 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
594 res_data.chp = to_channelpath(dev);
595 pr_debug("chpid: %x", sei_area->rsid);
596 if ((sei_area->vf & 0xc0) != 0) {
597 res_data.fla = sei_area->fla;
598 if ((sei_area->vf & 0xc0) == 0xc0) {
599 pr_debug(" full link addr: %x",
601 res_data.fla_mask = 0xffff;
603 pr_debug(" link addr: %x",
605 res_data.fla_mask = 0xff00;
608 ret = s390_process_res_acc(&res_data);
613 default: /* other stuff */
614 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
618 } while (sei_area->flags & 0x80);
623 __chp_add_new_sch(struct subchannel_id schid)
628 if (stsch(schid, &schib))
630 return need_rescan ? -EAGAIN : -ENXIO;
632 /* Put it on the slow path. */
633 ret = css_enqueue_subchannel_slow(schid);
635 css_clear_subchannel_slow_list();
644 __chp_add(struct subchannel_id schid, void *data)
647 struct channel_path *chp;
648 struct subchannel *sch;
650 chp = (struct channel_path *)data;
651 sch = get_subchannel_by_schid(schid);
653 /* Check if the subchannel is now available. */
654 return __chp_add_new_sch(schid);
655 spin_lock_irq(&sch->lock);
657 if (sch->schib.pmcw.chpid[i] == chp->id) {
658 if (stsch(sch->schid, &sch->schib) != 0) {
660 spin_unlock(&sch->lock);
666 spin_unlock(&sch->lock);
669 sch->lpm = ((sch->schib.pmcw.pim &
670 sch->schib.pmcw.pam &
672 | 0x80 >> i) & sch->opm;
674 if (sch->driver && sch->driver->verify)
675 sch->driver->verify(&sch->dev);
677 spin_unlock_irq(&sch->lock);
678 put_device(&sch->dev);
689 if (!get_chp_status(chpid))
690 return 0; /* no need to do the rest */
692 sprintf(dbf_txt, "cadd%x", chpid);
693 CIO_TRACE_EVENT(2, dbf_txt);
695 dev = get_device(&css[0]->chps[chpid]->dev);
696 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
697 if (css_slow_subchannels_exist())
706 * Handling of crw machine checks with channel path source.
709 chp_process_crw(int chpid, int on)
712 /* Path has gone. We use the link incident routine.*/
713 s390_set_chpid_offline(chpid);
714 return 0; /* De-register is async anyway. */
717 * Path has come. Allocate a new channel path structure,
720 if (get_chp_status(chpid) < 0)
721 new_channel_path(chpid);
722 /* Avoid the extra overhead in process_rec_acc. */
723 return chp_add(chpid);
727 __check_for_io_and_kill(struct subchannel *sch, int index)
731 if (!device_is_online(sch))
732 /* cio could be doing I/O. */
734 cc = stsch(sch->schid, &sch->schib);
737 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
738 device_set_waiting(sch);
745 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
750 if (!sch->ssd_info.valid)
753 spin_lock_irqsave(&sch->lock, flags);
755 for (chp = 0; chp < 8; chp++) {
756 if (sch->ssd_info.chpid[chp] != chpid)
760 sch->opm |= (0x80 >> chp);
761 sch->lpm |= (0x80 >> chp);
763 device_trigger_reprobe(sch);
764 else if (sch->driver && sch->driver->verify)
765 sch->driver->verify(&sch->dev);
767 sch->opm &= ~(0x80 >> chp);
768 sch->lpm &= ~(0x80 >> chp);
770 * Give running I/O a grace period in which it
771 * can successfully terminate, even using the
772 * just varied off path. Then kill it.
774 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
775 if (css_enqueue_subchannel_slow(sch->schid)) {
776 css_clear_subchannel_slow_list();
779 } else if (sch->driver && sch->driver->verify)
780 sch->driver->verify(&sch->dev);
784 spin_unlock_irqrestore(&sch->lock, flags);
788 s390_subchannel_vary_chpid_off(struct device *dev, void *data)
790 struct subchannel *sch;
793 sch = to_subchannel(dev);
796 __s390_subchannel_vary_chpid(sch, *chpid, 0);
801 s390_subchannel_vary_chpid_on(struct device *dev, void *data)
803 struct subchannel *sch;
806 sch = to_subchannel(dev);
809 __s390_subchannel_vary_chpid(sch, *chpid, 1);
814 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
817 struct subchannel *sch;
819 sch = get_subchannel_by_schid(schid);
821 put_device(&sch->dev);
824 if (stsch_err(schid, &schib))
827 /* Put it on the slow path. */
828 if (css_enqueue_subchannel_slow(schid)) {
829 css_clear_subchannel_slow_list();
837 * Function: s390_vary_chpid
838 * Varies the specified chpid online or offline
841 s390_vary_chpid( __u8 chpid, int on)
846 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
847 CIO_TRACE_EVENT( 2, dbf_text);
849 status = get_chp_status(chpid);
851 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
855 if (!on && !status) {
856 printk(KERN_ERR "chpid %x is already offline\n", chpid);
860 set_chp_logically_online(chpid, on);
863 * Redo PathVerification on the devices the chpid connects to
866 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
867 s390_subchannel_vary_chpid_on :
868 s390_subchannel_vary_chpid_off);
870 /* Scan for new devices on varied on path. */
871 for_each_subchannel(__s390_vary_chpid_on, NULL);
872 if (need_rescan || css_slow_subchannels_exist())
873 queue_work(slow_path_wq, &slow_path_work);
878 * Files for the channel path entries.
881 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
883 struct channel_path *chp = container_of(dev, struct channel_path, dev);
887 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
888 sprintf(buf, "offline\n"));
892 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
894 struct channel_path *cp = container_of(dev, struct channel_path, dev);
899 num_args = sscanf(buf, "%5s", cmd);
903 if (!strnicmp(cmd, "on", 2))
904 error = s390_vary_chpid(cp->id, 1);
905 else if (!strnicmp(cmd, "off", 3))
906 error = s390_vary_chpid(cp->id, 0);
910 return error < 0 ? error : count;
914 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
917 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
919 struct channel_path *chp = container_of(dev, struct channel_path, dev);
923 return sprintf(buf, "%x\n", chp->desc.desc);
926 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
928 static struct attribute * chp_attrs[] = {
929 &dev_attr_status.attr,
934 static struct attribute_group chp_attr_group = {
939 chp_release(struct device *dev)
941 struct channel_path *cp;
943 cp = container_of(dev, struct channel_path, dev);
948 chsc_determine_channel_path_description(int chpid,
949 struct channel_path_desc *desc)
954 struct chsc_header request;
960 struct chsc_header response;
962 struct channel_path_desc desc;
965 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
969 scpd_area->request = (struct chsc_header) {
974 scpd_area->first_chpid = chpid;
975 scpd_area->last_chpid = chpid;
977 ccode = chsc(scpd_area);
979 ret = (ccode == 3) ? -ENODEV : -EBUSY;
983 switch (scpd_area->response.code) {
984 case 0x0001: /* Success. */
985 memcpy(desc, &scpd_area->desc,
986 sizeof(struct channel_path_desc));
989 case 0x0003: /* Invalid block. */
990 case 0x0007: /* Invalid format. */
991 case 0x0008: /* Other invalid block. */
992 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
995 case 0x0004: /* Command not provided in model. */
996 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1000 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1001 scpd_area->response.code);
1005 free_page((unsigned long)scpd_area);
1010 * Entries for chpids on the system bus.
1011 * This replaces /proc/chpids.
1014 new_channel_path(int chpid)
1016 struct channel_path *chp;
1019 chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
1022 memset(chp, 0, sizeof(struct channel_path));
1024 /* fill in status, etc. */
1027 chp->dev = (struct device) {
1028 .parent = &css[0]->device,
1029 .release = chp_release,
1031 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1033 /* Obtain channel path description and fill it in. */
1034 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1038 /* make it known to the system */
1039 ret = device_register(&chp->dev);
1041 printk(KERN_WARNING "%s: could not register %02x\n",
1045 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1047 device_unregister(&chp->dev);
1050 css[0]->chps[chpid] = chp;
1058 chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1060 struct channel_path *chp;
1061 struct channel_path_desc *desc;
1063 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1066 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1069 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1075 chsc_alloc_sei_area(void)
1077 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1079 printk(KERN_WARNING"Can't allocate page for processing of " \
1080 "chsc machine checks!\n");
1081 return (sei_page ? 0 : -ENOMEM);
1085 chsc_enable_facility(int operation_code)
1089 struct chsc_header request;
1096 u32 operation_data_area[252];
1097 struct chsc_header response;
1103 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1106 sda_area->request = (struct chsc_header) {
1110 sda_area->operation_code = operation_code;
1112 ret = chsc(sda_area);
1114 ret = (ret == 3) ? -ENODEV : -EBUSY;
1117 switch (sda_area->response.code) {
1118 case 0x0003: /* invalid request block */
1122 case 0x0004: /* command not provided */
1123 case 0x0101: /* facility not provided */
1128 free_page((unsigned long)sda_area);
1132 subsys_initcall(chsc_alloc_sei_area);
1134 struct css_general_char css_general_characteristics;
1135 struct css_chsc_char css_chsc_characteristics;
1138 chsc_determine_css_characteristics(void)
1142 struct chsc_header request;
1146 struct chsc_header response;
1148 u32 general_char[510];
1152 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1154 printk(KERN_WARNING"cio: Was not able to determine available" \
1155 "CHSCs due to no memory.\n");
1159 scsc_area->request = (struct chsc_header) {
1164 result = chsc(scsc_area);
1166 printk(KERN_WARNING"cio: Was not able to determine " \
1167 "available CHSCs, cc=%i.\n", result);
1172 if (scsc_area->response.code != 1) {
1173 printk(KERN_WARNING"cio: Was not able to determine " \
1174 "available CHSCs.\n");
1178 memcpy(&css_general_characteristics, scsc_area->general_char,
1179 sizeof(css_general_characteristics));
1180 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1181 sizeof(css_chsc_characteristics));
1183 free_page ((unsigned long) scsc_area);
1187 EXPORT_SYMBOL_GPL(css_general_characteristics);
1188 EXPORT_SYMBOL_GPL(css_chsc_characteristics);