Merge tag 'soc-drivers-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / drivers / s390 / block / dasd_diag.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  * Based on.......: linux/drivers/s390/block/mdisk.c
5  * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
6  * Bugreports.to..: <Linux390@de.ibm.com>
7  * Copyright IBM Corp. 1999, 2000
8  *
9  */
10
11 #include <linux/kernel_stat.h>
12 #include <linux/stddef.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/hdreg.h>
16 #include <linux/bio.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/jiffies.h>
20 #include <asm/asm-extable.h>
21 #include <asm/dasd.h>
22 #include <asm/debug.h>
23 #include <asm/diag.h>
24 #include <asm/ebcdic.h>
25 #include <linux/io.h>
26 #include <asm/irq.h>
27 #include <asm/vtoc.h>
28
29 #include "dasd_int.h"
30 #include "dasd_diag.h"
31
32 MODULE_LICENSE("GPL");
33
34 /* The maximum number of blocks per request (max_blocks) is dependent on the
35  * amount of storage that is available in the static I/O buffer for each
36  * device. Currently each device gets 2 pages. We want to fit two requests
37  * into the available memory so that we can immediately start the next if one
38  * finishes. */
39 #define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
40                            sizeof(struct dasd_diag_req)) / \
41                            sizeof(struct dasd_diag_bio)) / 2)
42 #define DIAG_MAX_RETRIES        32
43 #define DIAG_TIMEOUT            50
44
45 static struct dasd_discipline dasd_diag_discipline;
46
47 struct dasd_diag_private {
48         struct dasd_diag_characteristics rdc_data;
49         struct dasd_diag_rw_io iob;
50         struct dasd_diag_init_io iib;
51         blocknum_t pt_block;
52         struct ccw_dev_id dev_id;
53 };
54
55 struct dasd_diag_req {
56         unsigned int block_count;
57         struct dasd_diag_bio bio[];
58 };
59
60 static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
61
62 /* Perform DIAG250 call with block I/O parameter list iob (input and output)
63  * and function code cmd.
64  * In case of an exception return 3. Otherwise return result of bitwise OR of
65  * resulting condition code and DIAG return code. */
66 static inline int __dia250(void *iob, int cmd)
67 {
68         union register_pair rx = { .even = (unsigned long)iob, };
69         typedef union {
70                 struct dasd_diag_init_io init_io;
71                 struct dasd_diag_rw_io rw_io;
72         } addr_type;
73         int cc;
74
75         cc = 3;
76         asm volatile(
77                 "       diag    %[rx],%[cmd],0x250\n"
78                 "0:     ipm     %[cc]\n"
79                 "       srl     %[cc],28\n"
80                 "1:\n"
81                 EX_TABLE(0b,1b)
82                 : [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob)
83                 : [cmd] "d" (cmd)
84                 : "cc");
85         return cc | rx.odd;
86 }
87
88 static inline int dia250(void *iob, int cmd)
89 {
90         diag_stat_inc(DIAG_STAT_X250);
91         return __dia250(iob, cmd);
92 }
93
94 /* Initialize block I/O to DIAG device using the specified blocksize and
95  * block offset. On success, return zero and set end_block to contain the
96  * number of blocks on the device minus the specified offset. Return non-zero
97  * otherwise. */
98 static inline int
99 mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
100              blocknum_t offset, blocknum_t *end_block)
101 {
102         struct dasd_diag_private *private = device->private;
103         struct dasd_diag_init_io *iib = &private->iib;
104         int rc;
105
106         memset(iib, 0, sizeof (struct dasd_diag_init_io));
107
108         iib->dev_nr = private->dev_id.devno;
109         iib->block_size = blocksize;
110         iib->offset = offset;
111         iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
112
113         rc = dia250(iib, INIT_BIO);
114
115         if ((rc & 3) == 0 && end_block)
116                 *end_block = iib->end_block;
117
118         return rc;
119 }
120
121 /* Remove block I/O environment for device. Return zero on success, non-zero
122  * otherwise. */
123 static inline int
124 mdsk_term_io(struct dasd_device * device)
125 {
126         struct dasd_diag_private *private = device->private;
127         struct dasd_diag_init_io *iib = &private->iib;
128         int rc;
129
130         memset(iib, 0, sizeof (struct dasd_diag_init_io));
131         iib->dev_nr = private->dev_id.devno;
132         rc = dia250(iib, TERM_BIO);
133         return rc;
134 }
135
136 /* Error recovery for failed DIAG requests - try to reestablish the DIAG
137  * environment. */
138 static void
139 dasd_diag_erp(struct dasd_device *device)
140 {
141         int rc;
142
143         mdsk_term_io(device);
144         rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
145         if (rc == 4) {
146                 if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
147                         pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
148                                 dev_name(&device->cdev->dev));
149                 rc = 0;
150         }
151         if (rc)
152                 pr_warn("%s: DIAG ERP failed with rc=%d\n",
153                         dev_name(&device->cdev->dev), rc);
154 }
155
156 /* Start a given request at the device. Return zero on success, non-zero
157  * otherwise. */
158 static int
159 dasd_start_diag(struct dasd_ccw_req * cqr)
160 {
161         struct dasd_device *device;
162         struct dasd_diag_private *private;
163         struct dasd_diag_req *dreq;
164         int rc;
165
166         device = cqr->startdev;
167         if (cqr->retries < 0) {
168                 DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
169                             "- no retry left)", cqr);
170                 cqr->status = DASD_CQR_ERROR;
171                 return -EIO;
172         }
173         private = device->private;
174         dreq = cqr->data;
175
176         private->iob.dev_nr = private->dev_id.devno;
177         private->iob.key = 0;
178         private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
179         private->iob.block_count = dreq->block_count;
180         private->iob.interrupt_params = (addr_t) cqr;
181         private->iob.bio_list = dreq->bio;
182         private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
183
184         cqr->startclk = get_tod_clock();
185         cqr->starttime = jiffies;
186         cqr->retries--;
187
188         rc = dia250(&private->iob, RW_BIO);
189         switch (rc) {
190         case 0: /* Synchronous I/O finished successfully */
191                 cqr->stopclk = get_tod_clock();
192                 cqr->status = DASD_CQR_SUCCESS;
193                 /* Indicate to calling function that only a dasd_schedule_bh()
194                    and no timer is needed */
195                 rc = -EACCES;
196                 break;
197         case 8: /* Asynchronous I/O was started */
198                 cqr->status = DASD_CQR_IN_IO;
199                 rc = 0;
200                 break;
201         default: /* Error condition */
202                 cqr->status = DASD_CQR_QUEUED;
203                 DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
204                 dasd_diag_erp(device);
205                 rc = -EIO;
206                 break;
207         }
208         cqr->intrc = rc;
209         return rc;
210 }
211
212 /* Terminate given request at the device. */
213 static int
214 dasd_diag_term_IO(struct dasd_ccw_req * cqr)
215 {
216         struct dasd_device *device;
217
218         device = cqr->startdev;
219         mdsk_term_io(device);
220         mdsk_init_io(device, device->block->bp_block, 0, NULL);
221         cqr->status = DASD_CQR_CLEAR_PENDING;
222         cqr->stopclk = get_tod_clock();
223         dasd_schedule_device_bh(device);
224         return 0;
225 }
226
227 /* Handle external interruption. */
228 static void dasd_ext_handler(struct ext_code ext_code,
229                              unsigned int param32, unsigned long param64)
230 {
231         struct dasd_ccw_req *cqr, *next;
232         struct dasd_device *device;
233         unsigned long expires;
234         unsigned long flags;
235         addr_t ip;
236         int rc;
237
238         switch (ext_code.subcode >> 8) {
239         case DASD_DIAG_CODE_31BIT:
240                 ip = (addr_t) param32;
241                 break;
242         case DASD_DIAG_CODE_64BIT:
243                 ip = (addr_t) param64;
244                 break;
245         default:
246                 return;
247         }
248         inc_irq_stat(IRQEXT_DSD);
249         if (!ip) {              /* no intparm: unsolicited interrupt */
250                 DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
251                               "interrupt");
252                 return;
253         }
254         cqr = (struct dasd_ccw_req *) ip;
255         device = (struct dasd_device *) cqr->startdev;
256         if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
257                 DBF_DEV_EVENT(DBF_WARNING, device,
258                             " magic number of dasd_ccw_req 0x%08X doesn't"
259                             " match discipline 0x%08X",
260                             cqr->magic, *(int *) (&device->discipline->name));
261                 return;
262         }
263
264         /* get irq lock to modify request queue */
265         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
266
267         /* Check for a pending clear operation */
268         if (cqr->status == DASD_CQR_CLEAR_PENDING) {
269                 cqr->status = DASD_CQR_CLEARED;
270                 dasd_device_clear_timer(device);
271                 dasd_schedule_device_bh(device);
272                 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
273                 return;
274         }
275
276         cqr->stopclk = get_tod_clock();
277
278         expires = 0;
279         if ((ext_code.subcode & 0xff) == 0) {
280                 cqr->status = DASD_CQR_SUCCESS;
281                 /* Start first request on queue if possible -> fast_io. */
282                 if (!list_empty(&device->ccw_queue)) {
283                         next = list_entry(device->ccw_queue.next,
284                                           struct dasd_ccw_req, devlist);
285                         if (next->status == DASD_CQR_QUEUED) {
286                                 rc = dasd_start_diag(next);
287                                 if (rc == 0)
288                                         expires = next->expires;
289                         }
290                 }
291         } else {
292                 cqr->status = DASD_CQR_QUEUED;
293                 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
294                               "request %p was %d (%d retries left)", cqr,
295                               ext_code.subcode & 0xff, cqr->retries);
296                 dasd_diag_erp(device);
297         }
298
299         if (expires != 0)
300                 dasd_device_set_timer(device, expires);
301         else
302                 dasd_device_clear_timer(device);
303         dasd_schedule_device_bh(device);
304
305         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
306 }
307
308 /* Check whether device can be controlled by DIAG discipline. Return zero on
309  * success, non-zero otherwise. */
310 static int
311 dasd_diag_check_device(struct dasd_device *device)
312 {
313         struct dasd_diag_private *private = device->private;
314         struct dasd_diag_characteristics *rdc_data;
315         struct vtoc_cms_label *label;
316         struct dasd_block *block;
317         struct dasd_diag_bio *bio;
318         unsigned int sb, bsize;
319         blocknum_t end_block;
320         int rc;
321
322         if (private == NULL) {
323                 private = kzalloc(sizeof(*private), GFP_KERNEL);
324                 if (private == NULL) {
325                         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
326                                 "Allocating memory for private DASD data "
327                                       "failed\n");
328                         return -ENOMEM;
329                 }
330                 ccw_device_get_id(device->cdev, &private->dev_id);
331                 device->private = private;
332         }
333         block = dasd_alloc_block();
334         if (IS_ERR(block)) {
335                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
336                             "could not allocate dasd block structure");
337                 device->private = NULL;
338                 kfree(private);
339                 return PTR_ERR(block);
340         }
341         device->block = block;
342         block->base = device;
343
344         /* Read Device Characteristics */
345         rdc_data = &private->rdc_data;
346         rdc_data->dev_nr = private->dev_id.devno;
347         rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
348
349         rc = diag210((struct diag210 *) rdc_data);
350         if (rc) {
351                 DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
352                             "information (rc=%d)", rc);
353                 rc = -EOPNOTSUPP;
354                 goto out;
355         }
356
357         device->default_expires = DIAG_TIMEOUT;
358         device->default_retries = DIAG_MAX_RETRIES;
359
360         /* Figure out position of label block */
361         switch (private->rdc_data.vdev_class) {
362         case DEV_CLASS_FBA:
363                 private->pt_block = 1;
364                 break;
365         case DEV_CLASS_ECKD:
366                 private->pt_block = 2;
367                 break;
368         default:
369                 pr_warn("%s: Device type %d is not supported in DIAG mode\n",
370                         dev_name(&device->cdev->dev),
371                         private->rdc_data.vdev_class);
372                 rc = -EOPNOTSUPP;
373                 goto out;
374         }
375
376         DBF_DEV_EVENT(DBF_INFO, device,
377                       "%04X: %04X on real %04X/%02X",
378                       rdc_data->dev_nr,
379                       rdc_data->vdev_type,
380                       rdc_data->rdev_type, rdc_data->rdev_model);
381
382         /* terminate all outstanding operations */
383         mdsk_term_io(device);
384
385         /* figure out blocksize of device */
386         label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
387         if (label == NULL)  {
388                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
389                             "No memory to allocate initialization request");
390                 rc = -ENOMEM;
391                 goto out;
392         }
393         bio = kzalloc(sizeof(*bio), GFP_KERNEL);
394         if (bio == NULL)  {
395                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
396                               "No memory to allocate initialization bio");
397                 rc = -ENOMEM;
398                 goto out_label;
399         }
400         rc = 0;
401         end_block = 0;
402         /* try all sizes - needed for ECKD devices */
403         for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
404                 mdsk_init_io(device, bsize, 0, &end_block);
405                 memset(bio, 0, sizeof(*bio));
406                 bio->type = MDSK_READ_REQ;
407                 bio->block_number = private->pt_block + 1;
408                 bio->buffer = label;
409                 memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
410                 private->iob.dev_nr = rdc_data->dev_nr;
411                 private->iob.key = 0;
412                 private->iob.flags = 0; /* do synchronous io */
413                 private->iob.block_count = 1;
414                 private->iob.interrupt_params = 0;
415                 private->iob.bio_list = bio;
416                 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
417                 rc = dia250(&private->iob, RW_BIO);
418                 if (rc == 3) {
419                         pr_warn("%s: A 64-bit DIAG call failed\n",
420                                 dev_name(&device->cdev->dev));
421                         rc = -EOPNOTSUPP;
422                         goto out_bio;
423                 }
424                 mdsk_term_io(device);
425                 if (rc == 0)
426                         break;
427         }
428         if (bsize > PAGE_SIZE) {
429                 pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
430                         dev_name(&device->cdev->dev), rc);
431                 rc = -EIO;
432                 goto out_bio;
433         }
434         /* check for label block */
435         if (memcmp(label->label_id, DASD_DIAG_CMS1,
436                   sizeof(DASD_DIAG_CMS1)) == 0) {
437                 /* get formatted blocksize from label block */
438                 bsize = (unsigned int) label->block_size;
439                 block->blocks = (unsigned long) label->block_count;
440         } else
441                 block->blocks = end_block;
442         block->bp_block = bsize;
443         block->s2b_shift = 0;   /* bits to shift 512 to get a block */
444         for (sb = 512; sb < bsize; sb = sb << 1)
445                 block->s2b_shift++;
446         rc = mdsk_init_io(device, block->bp_block, 0, NULL);
447         if (rc && (rc != 4)) {
448                 pr_warn("%s: DIAG initialization failed with rc=%d\n",
449                         dev_name(&device->cdev->dev), rc);
450                 rc = -EIO;
451         } else {
452                 if (rc == 4)
453                         set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
454                 pr_info("%s: New DASD with %ld byte/block, total size %ld "
455                         "KB%s\n", dev_name(&device->cdev->dev),
456                         (unsigned long) block->bp_block,
457                         (unsigned long) (block->blocks <<
458                                          block->s2b_shift) >> 1,
459                         (rc == 4) ? ", read-only device" : "");
460                 rc = 0;
461         }
462 out_bio:
463         kfree(bio);
464 out_label:
465         free_page((long) label);
466 out:
467         if (rc) {
468                 device->block = NULL;
469                 dasd_free_block(block);
470                 device->private = NULL;
471                 kfree(private);
472         }
473         return rc;
474 }
475
476 /* Fill in virtual disk geometry for device. Return zero on success, non-zero
477  * otherwise. */
478 static int
479 dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
480 {
481         if (dasd_check_blocksize(block->bp_block) != 0)
482                 return -EINVAL;
483         geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
484         geo->heads = 16;
485         geo->sectors = 128 >> block->s2b_shift;
486         return 0;
487 }
488
489 static dasd_erp_fn_t
490 dasd_diag_erp_action(struct dasd_ccw_req * cqr)
491 {
492         return dasd_default_erp_action;
493 }
494
495 static dasd_erp_fn_t
496 dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
497 {
498         return dasd_default_erp_postaction;
499 }
500
501 /* Create DASD request from block device request. Return pointer to new
502  * request on success, ERR_PTR otherwise. */
503 static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
504                                                struct dasd_block *block,
505                                                struct request *req)
506 {
507         struct dasd_ccw_req *cqr;
508         struct dasd_diag_req *dreq;
509         struct dasd_diag_bio *dbio;
510         struct req_iterator iter;
511         struct bio_vec bv;
512         char *dst;
513         unsigned int count;
514         sector_t recid, first_rec, last_rec;
515         unsigned int blksize, off;
516         unsigned char rw_cmd;
517
518         if (rq_data_dir(req) == READ)
519                 rw_cmd = MDSK_READ_REQ;
520         else if (rq_data_dir(req) == WRITE)
521                 rw_cmd = MDSK_WRITE_REQ;
522         else
523                 return ERR_PTR(-EINVAL);
524         blksize = block->bp_block;
525         /* Calculate record id of first and last block. */
526         first_rec = blk_rq_pos(req) >> block->s2b_shift;
527         last_rec =
528                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
529         /* Check struct bio and count the number of blocks for the request. */
530         count = 0;
531         rq_for_each_segment(bv, req, iter) {
532                 if (bv.bv_len & (blksize - 1))
533                         /* Fba can only do full blocks. */
534                         return ERR_PTR(-EINVAL);
535                 count += bv.bv_len >> (block->s2b_shift + 9);
536         }
537         /* Paranoia. */
538         if (count != last_rec - first_rec + 1)
539                 return ERR_PTR(-EINVAL);
540         /* Build the request */
541         cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, struct_size(dreq, bio, count),
542                                    memdev, blk_mq_rq_to_pdu(req));
543         if (IS_ERR(cqr))
544                 return cqr;
545
546         dreq = (struct dasd_diag_req *) cqr->data;
547         dreq->block_count = count;
548         dbio = dreq->bio;
549         recid = first_rec;
550         rq_for_each_segment(bv, req, iter) {
551                 dst = bvec_virt(&bv);
552                 for (off = 0; off < bv.bv_len; off += blksize) {
553                         memset(dbio, 0, sizeof (struct dasd_diag_bio));
554                         dbio->type = rw_cmd;
555                         dbio->block_number = recid + 1;
556                         dbio->buffer = dst;
557                         dbio++;
558                         dst += blksize;
559                         recid++;
560                 }
561         }
562         cqr->retries = memdev->default_retries;
563         cqr->buildclk = get_tod_clock();
564         if (blk_noretry_request(req) ||
565             block->base->features & DASD_FEATURE_FAILFAST)
566                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
567         cqr->startdev = memdev;
568         cqr->memdev = memdev;
569         cqr->block = block;
570         cqr->expires = memdev->default_expires * HZ;
571         cqr->status = DASD_CQR_FILLED;
572         return cqr;
573 }
574
575 /* Release DASD request. Return non-zero if request was successful, zero
576  * otherwise. */
577 static int
578 dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
579 {
580         int status;
581
582         status = cqr->status == DASD_CQR_DONE;
583         dasd_sfree_request(cqr, cqr->memdev);
584         return status;
585 }
586
587 static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
588 {
589         if (cqr->retries < 0)
590                 cqr->status = DASD_CQR_FAILED;
591         else
592                 cqr->status = DASD_CQR_FILLED;
593 };
594
595 /* Fill in IOCTL data for device. */
596 static int
597 dasd_diag_fill_info(struct dasd_device * device,
598                     struct dasd_information2_t * info)
599 {
600         struct dasd_diag_private *private = device->private;
601
602         info->label_block = (unsigned int) private->pt_block;
603         info->FBA_layout = 1;
604         info->format = DASD_FORMAT_LDL;
605         info->characteristics_size = sizeof(private->rdc_data);
606         memcpy(info->characteristics, &private->rdc_data,
607                sizeof(private->rdc_data));
608         info->confdata_size = 0;
609         return 0;
610 }
611
612 static void
613 dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
614                      struct irb *stat)
615 {
616         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
617                     "dump sense not available for DIAG data");
618 }
619
620 static unsigned int dasd_diag_max_sectors(struct dasd_block *block)
621 {
622         return DIAG_MAX_BLOCKS << block->s2b_shift;
623 }
624
625 static int dasd_diag_pe_handler(struct dasd_device *device,
626                                 __u8 tbvpm, __u8 fcsecpm)
627 {
628         return dasd_generic_verify_path(device, tbvpm);
629 }
630
631 static struct dasd_discipline dasd_diag_discipline = {
632         .owner = THIS_MODULE,
633         .name = "DIAG",
634         .ebcname = "DIAG",
635         .max_sectors = dasd_diag_max_sectors,
636         .check_device = dasd_diag_check_device,
637         .pe_handler = dasd_diag_pe_handler,
638         .fill_geometry = dasd_diag_fill_geometry,
639         .start_IO = dasd_start_diag,
640         .term_IO = dasd_diag_term_IO,
641         .handle_terminated_request = dasd_diag_handle_terminated_request,
642         .erp_action = dasd_diag_erp_action,
643         .erp_postaction = dasd_diag_erp_postaction,
644         .build_cp = dasd_diag_build_cp,
645         .free_cp = dasd_diag_free_cp,
646         .dump_sense = dasd_diag_dump_sense,
647         .fill_info = dasd_diag_fill_info,
648 };
649
650 static int __init
651 dasd_diag_init(void)
652 {
653         if (!MACHINE_IS_VM) {
654                 pr_info("Discipline %s cannot be used without z/VM\n",
655                         dasd_diag_discipline.name);
656                 return -ENODEV;
657         }
658         ASCEBC(dasd_diag_discipline.ebcname, 4);
659
660         irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
661         register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
662         dasd_diag_discipline_pointer = &dasd_diag_discipline;
663         return 0;
664 }
665
666 static void __exit
667 dasd_diag_cleanup(void)
668 {
669         unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
670         irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
671         dasd_diag_discipline_pointer = NULL;
672 }
673
674 module_init(dasd_diag_init);
675 module_exit(dasd_diag_cleanup);