x86/insn: Directly assign x86_64 state in insn_init()
[linux-2.6-block.git] / drivers / s390 / block / dasd_eckd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *                  Horst Hummel <Horst.Hummel@de.ibm.com>
5  *                  Carsten Otte <Cotte@de.ibm.com>
6  *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12
13 #define KMSG_COMPONENT "dasd-eckd"
14
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>        /* HDIO_GETGEO                      */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/compat.h>
22 #include <linux/init.h>
23 #include <linux/seq_file.h>
24 #include <linux/uaccess.h>
25 #include <linux/io.h>
26
27 #include <asm/css_chars.h>
28 #include <asm/debug.h>
29 #include <asm/idals.h>
30 #include <asm/ebcdic.h>
31 #include <asm/cio.h>
32 #include <asm/ccwdev.h>
33 #include <asm/itcw.h>
34 #include <asm/schid.h>
35 #include <asm/chpid.h>
36
37 #include "dasd_int.h"
38 #include "dasd_eckd.h"
39
40 #ifdef PRINTK_HEADER
41 #undef PRINTK_HEADER
42 #endif                          /* PRINTK_HEADER */
43 #define PRINTK_HEADER "dasd(eckd):"
44
45 /*
46  * raw track access always map to 64k in memory
47  * so it maps to 16 blocks of 4k per track
48  */
49 #define DASD_RAW_BLOCK_PER_TRACK 16
50 #define DASD_RAW_BLOCKSIZE 4096
51 /* 64k are 128 x 512 byte sectors  */
52 #define DASD_RAW_SECTORS_PER_TRACK 128
53
54 MODULE_LICENSE("GPL");
55
56 static struct dasd_discipline dasd_eckd_discipline;
57
58 /* The ccw bus type uses this table to find devices that it sends to
59  * dasd_eckd_probe */
60 static struct ccw_device_id dasd_eckd_ids[] = {
61         { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
62         { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
63         { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
64         { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
65         { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
66         { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
67         { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
68         { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
69         { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
70         { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
71         { /* end of list */ },
72 };
73
74 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
75
76 static struct ccw_driver dasd_eckd_driver; /* see below */
77
78 static void *rawpadpage;
79
80 #define INIT_CQR_OK 0
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
83
84 /* emergency request for reserve/release */
85 static struct {
86         struct dasd_ccw_req cqr;
87         struct ccw1 ccw;
88         char data[32];
89 } *dasd_reserve_req;
90 static DEFINE_MUTEX(dasd_reserve_mutex);
91
92 static struct {
93         struct dasd_ccw_req cqr;
94         struct ccw1 ccw[2];
95         char data[40];
96 } *dasd_vol_info_req;
97 static DEFINE_MUTEX(dasd_vol_info_mutex);
98
99 struct ext_pool_exhaust_work_data {
100         struct work_struct worker;
101         struct dasd_device *device;
102         struct dasd_device *base;
103 };
104
105 /* definitions for the path verification worker */
106 struct pe_handler_work_data {
107         struct work_struct worker;
108         struct dasd_device *device;
109         struct dasd_ccw_req cqr;
110         struct ccw1 ccw;
111         __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
112         int isglobal;
113         __u8 tbvpm;
114         __u8 fcsecpm;
115 };
116 static struct pe_handler_work_data *pe_handler_worker;
117 static DEFINE_MUTEX(dasd_pe_handler_mutex);
118
119 struct check_attention_work_data {
120         struct work_struct worker;
121         struct dasd_device *device;
122         __u8 lpum;
123 };
124
125 static int dasd_eckd_ext_pool_id(struct dasd_device *);
126 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
127                         struct dasd_device *, struct dasd_device *,
128                         unsigned int, int, unsigned int, unsigned int,
129                         unsigned int, unsigned int);
130 static int dasd_eckd_query_pprc_status(struct dasd_device *,
131                                        struct dasd_pprc_data_sc4 *);
132
133 /* initial attempt at a probe function. this can be simplified once
134  * the other detection code is gone */
135 static int
136 dasd_eckd_probe (struct ccw_device *cdev)
137 {
138         int ret;
139
140         /* set ECKD specific ccw-device options */
141         ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
142                                      CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
143         if (ret) {
144                 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
145                                 "dasd_eckd_probe: could not set "
146                                 "ccw-device options");
147                 return ret;
148         }
149         ret = dasd_generic_probe(cdev);
150         return ret;
151 }
152
153 static int
154 dasd_eckd_set_online(struct ccw_device *cdev)
155 {
156         return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
157 }
158
159 static const int sizes_trk0[] = { 28, 148, 84 };
160 #define LABEL_SIZE 140
161
162 /* head and record addresses of count_area read in analysis ccw */
163 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
164 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
165
166 static inline unsigned int
167 ceil_quot(unsigned int d1, unsigned int d2)
168 {
169         return (d1 + (d2 - 1)) / d2;
170 }
171
172 static unsigned int
173 recs_per_track(struct dasd_eckd_characteristics * rdc,
174                unsigned int kl, unsigned int dl)
175 {
176         int dn, kn;
177
178         switch (rdc->dev_type) {
179         case 0x3380:
180                 if (kl)
181                         return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
182                                        ceil_quot(dl + 12, 32));
183                 else
184                         return 1499 / (15 + ceil_quot(dl + 12, 32));
185         case 0x3390:
186                 dn = ceil_quot(dl + 6, 232) + 1;
187                 if (kl) {
188                         kn = ceil_quot(kl + 6, 232) + 1;
189                         return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
190                                        9 + ceil_quot(dl + 6 * dn, 34));
191                 } else
192                         return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
193         case 0x9345:
194                 dn = ceil_quot(dl + 6, 232) + 1;
195                 if (kl) {
196                         kn = ceil_quot(kl + 6, 232) + 1;
197                         return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
198                                        ceil_quot(dl + 6 * dn, 34));
199                 } else
200                         return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
201         }
202         return 0;
203 }
204
205 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
206 {
207         geo->cyl = (__u16) cyl;
208         geo->head = cyl >> 16;
209         geo->head <<= 4;
210         geo->head |= head;
211 }
212
213 /*
214  * calculate failing track from sense data depending if
215  * it is an EAV device or not
216  */
217 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
218                                     sector_t *track)
219 {
220         struct dasd_eckd_private *private = device->private;
221         u8 *sense = NULL;
222         u32 cyl;
223         u8 head;
224
225         sense = dasd_get_sense(irb);
226         if (!sense) {
227                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
228                               "ESE error no sense data\n");
229                 return -EINVAL;
230         }
231         if (!(sense[27] & DASD_SENSE_BIT_2)) {
232                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
233                               "ESE error no valid track data\n");
234                 return -EINVAL;
235         }
236
237         if (sense[27] & DASD_SENSE_BIT_3) {
238                 /* enhanced addressing */
239                 cyl = sense[30] << 20;
240                 cyl |= (sense[31] & 0xF0) << 12;
241                 cyl |= sense[28] << 8;
242                 cyl |= sense[29];
243         } else {
244                 cyl = sense[29] << 8;
245                 cyl |= sense[30];
246         }
247         head = sense[31] & 0x0F;
248         *track = cyl * private->rdc_data.trk_per_cyl + head;
249         return 0;
250 }
251
252 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
253                      struct dasd_device *device)
254 {
255         struct dasd_eckd_private *private = device->private;
256         int rc;
257
258         rc = get_phys_clock(&data->ep_sys_time);
259         /*
260          * Ignore return code if XRC is not supported or
261          * sync clock is switched off
262          */
263         if ((rc && !private->rdc_data.facilities.XRC_supported) ||
264             rc == -EOPNOTSUPP || rc == -EACCES)
265                 return 0;
266
267         /* switch on System Time Stamp - needed for XRC Support */
268         data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
269         data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
270
271         if (ccw) {
272                 ccw->count = sizeof(struct DE_eckd_data);
273                 ccw->flags |= CCW_FLAG_SLI;
274         }
275
276         return rc;
277 }
278
279 static int
280 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
281               unsigned int totrk, int cmd, struct dasd_device *device,
282               int blksize)
283 {
284         struct dasd_eckd_private *private = device->private;
285         u16 heads, beghead, endhead;
286         u32 begcyl, endcyl;
287         int rc = 0;
288
289         if (ccw) {
290                 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
291                 ccw->flags = 0;
292                 ccw->count = 16;
293                 ccw->cda = (__u32)virt_to_phys(data);
294         }
295
296         memset(data, 0, sizeof(struct DE_eckd_data));
297         switch (cmd) {
298         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
299         case DASD_ECKD_CCW_READ_RECORD_ZERO:
300         case DASD_ECKD_CCW_READ:
301         case DASD_ECKD_CCW_READ_MT:
302         case DASD_ECKD_CCW_READ_CKD:
303         case DASD_ECKD_CCW_READ_CKD_MT:
304         case DASD_ECKD_CCW_READ_KD:
305         case DASD_ECKD_CCW_READ_KD_MT:
306                 data->mask.perm = 0x1;
307                 data->attributes.operation = private->attrib.operation;
308                 break;
309         case DASD_ECKD_CCW_READ_COUNT:
310                 data->mask.perm = 0x1;
311                 data->attributes.operation = DASD_BYPASS_CACHE;
312                 break;
313         case DASD_ECKD_CCW_READ_TRACK:
314         case DASD_ECKD_CCW_READ_TRACK_DATA:
315                 data->mask.perm = 0x1;
316                 data->attributes.operation = private->attrib.operation;
317                 data->blk_size = 0;
318                 break;
319         case DASD_ECKD_CCW_WRITE:
320         case DASD_ECKD_CCW_WRITE_MT:
321         case DASD_ECKD_CCW_WRITE_KD:
322         case DASD_ECKD_CCW_WRITE_KD_MT:
323                 data->mask.perm = 0x02;
324                 data->attributes.operation = private->attrib.operation;
325                 rc = set_timestamp(ccw, data, device);
326                 break;
327         case DASD_ECKD_CCW_WRITE_CKD:
328         case DASD_ECKD_CCW_WRITE_CKD_MT:
329                 data->attributes.operation = DASD_BYPASS_CACHE;
330                 rc = set_timestamp(ccw, data, device);
331                 break;
332         case DASD_ECKD_CCW_ERASE:
333         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
334         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
335                 data->mask.perm = 0x3;
336                 data->mask.auth = 0x1;
337                 data->attributes.operation = DASD_BYPASS_CACHE;
338                 rc = set_timestamp(ccw, data, device);
339                 break;
340         case DASD_ECKD_CCW_WRITE_FULL_TRACK:
341                 data->mask.perm = 0x03;
342                 data->attributes.operation = private->attrib.operation;
343                 data->blk_size = 0;
344                 break;
345         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
346                 data->mask.perm = 0x02;
347                 data->attributes.operation = private->attrib.operation;
348                 data->blk_size = blksize;
349                 rc = set_timestamp(ccw, data, device);
350                 break;
351         default:
352                 dev_err(&device->cdev->dev,
353                         "0x%x is not a known command\n", cmd);
354                 break;
355         }
356
357         data->attributes.mode = 0x3;    /* ECKD */
358
359         if ((private->rdc_data.cu_type == 0x2105 ||
360              private->rdc_data.cu_type == 0x2107 ||
361              private->rdc_data.cu_type == 0x1750)
362             && !(private->uses_cdl && trk < 2))
363                 data->ga_extended |= 0x40; /* Regular Data Format Mode */
364
365         heads = private->rdc_data.trk_per_cyl;
366         begcyl = trk / heads;
367         beghead = trk % heads;
368         endcyl = totrk / heads;
369         endhead = totrk % heads;
370
371         /* check for sequential prestage - enhance cylinder range */
372         if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
373             data->attributes.operation == DASD_SEQ_ACCESS) {
374
375                 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
376                         endcyl += private->attrib.nr_cyl;
377                 else
378                         endcyl = (private->real_cyl - 1);
379         }
380
381         set_ch_t(&data->beg_ext, begcyl, beghead);
382         set_ch_t(&data->end_ext, endcyl, endhead);
383         return rc;
384 }
385
386
387 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
388                               unsigned int trk, unsigned int rec_on_trk,
389                               int count, int cmd, struct dasd_device *device,
390                               unsigned int reclen, unsigned int tlf)
391 {
392         struct dasd_eckd_private *private = device->private;
393         int sector;
394         int dn, d;
395
396         if (ccw) {
397                 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
398                 ccw->flags = 0;
399                 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
400                         ccw->count = 22;
401                 else
402                         ccw->count = 20;
403                 ccw->cda = (__u32)virt_to_phys(data);
404         }
405
406         memset(data, 0, sizeof(*data));
407         sector = 0;
408         if (rec_on_trk) {
409                 switch (private->rdc_data.dev_type) {
410                 case 0x3390:
411                         dn = ceil_quot(reclen + 6, 232);
412                         d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
413                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
414                         break;
415                 case 0x3380:
416                         d = 7 + ceil_quot(reclen + 12, 32);
417                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
418                         break;
419                 }
420         }
421         data->sector = sector;
422         /* note: meaning of count depends on the operation
423          *       for record based I/O it's the number of records, but for
424          *       track based I/O it's the number of tracks
425          */
426         data->count = count;
427         switch (cmd) {
428         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
429                 data->operation.orientation = 0x3;
430                 data->operation.operation = 0x03;
431                 break;
432         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
433                 data->operation.orientation = 0x3;
434                 data->operation.operation = 0x16;
435                 break;
436         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
437                 data->operation.orientation = 0x1;
438                 data->operation.operation = 0x03;
439                 data->count++;
440                 break;
441         case DASD_ECKD_CCW_READ_RECORD_ZERO:
442                 data->operation.orientation = 0x3;
443                 data->operation.operation = 0x16;
444                 data->count++;
445                 break;
446         case DASD_ECKD_CCW_WRITE:
447         case DASD_ECKD_CCW_WRITE_MT:
448         case DASD_ECKD_CCW_WRITE_KD:
449         case DASD_ECKD_CCW_WRITE_KD_MT:
450                 data->auxiliary.length_valid = 0x1;
451                 data->length = reclen;
452                 data->operation.operation = 0x01;
453                 break;
454         case DASD_ECKD_CCW_WRITE_CKD:
455         case DASD_ECKD_CCW_WRITE_CKD_MT:
456                 data->auxiliary.length_valid = 0x1;
457                 data->length = reclen;
458                 data->operation.operation = 0x03;
459                 break;
460         case DASD_ECKD_CCW_WRITE_FULL_TRACK:
461                 data->operation.orientation = 0x0;
462                 data->operation.operation = 0x3F;
463                 data->extended_operation = 0x11;
464                 data->length = 0;
465                 data->extended_parameter_length = 0x02;
466                 if (data->count > 8) {
467                         data->extended_parameter[0] = 0xFF;
468                         data->extended_parameter[1] = 0xFF;
469                         data->extended_parameter[1] <<= (16 - count);
470                 } else {
471                         data->extended_parameter[0] = 0xFF;
472                         data->extended_parameter[0] <<= (8 - count);
473                         data->extended_parameter[1] = 0x00;
474                 }
475                 data->sector = 0xFF;
476                 break;
477         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
478                 data->auxiliary.length_valid = 0x1;
479                 data->length = reclen;  /* not tlf, as one might think */
480                 data->operation.operation = 0x3F;
481                 data->extended_operation = 0x23;
482                 break;
483         case DASD_ECKD_CCW_READ:
484         case DASD_ECKD_CCW_READ_MT:
485         case DASD_ECKD_CCW_READ_KD:
486         case DASD_ECKD_CCW_READ_KD_MT:
487                 data->auxiliary.length_valid = 0x1;
488                 data->length = reclen;
489                 data->operation.operation = 0x06;
490                 break;
491         case DASD_ECKD_CCW_READ_CKD:
492         case DASD_ECKD_CCW_READ_CKD_MT:
493                 data->auxiliary.length_valid = 0x1;
494                 data->length = reclen;
495                 data->operation.operation = 0x16;
496                 break;
497         case DASD_ECKD_CCW_READ_COUNT:
498                 data->operation.operation = 0x06;
499                 break;
500         case DASD_ECKD_CCW_READ_TRACK:
501                 data->operation.orientation = 0x1;
502                 data->operation.operation = 0x0C;
503                 data->extended_parameter_length = 0;
504                 data->sector = 0xFF;
505                 break;
506         case DASD_ECKD_CCW_READ_TRACK_DATA:
507                 data->auxiliary.length_valid = 0x1;
508                 data->length = tlf;
509                 data->operation.operation = 0x0C;
510                 break;
511         case DASD_ECKD_CCW_ERASE:
512                 data->length = reclen;
513                 data->auxiliary.length_valid = 0x1;
514                 data->operation.operation = 0x0b;
515                 break;
516         default:
517                 DBF_DEV_EVENT(DBF_ERR, device,
518                             "fill LRE unknown opcode 0x%x", cmd);
519                 BUG();
520         }
521         set_ch_t(&data->seek_addr,
522                  trk / private->rdc_data.trk_per_cyl,
523                  trk % private->rdc_data.trk_per_cyl);
524         data->search_arg.cyl = data->seek_addr.cyl;
525         data->search_arg.head = data->seek_addr.head;
526         data->search_arg.record = rec_on_trk;
527 }
528
529 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
530                       unsigned int trk, unsigned int totrk, int cmd,
531                       struct dasd_device *basedev, struct dasd_device *startdev,
532                       unsigned int format, unsigned int rec_on_trk, int count,
533                       unsigned int blksize, unsigned int tlf)
534 {
535         struct dasd_eckd_private *basepriv, *startpriv;
536         struct LRE_eckd_data *lredata;
537         struct DE_eckd_data *dedata;
538         int rc = 0;
539
540         basepriv = basedev->private;
541         startpriv = startdev->private;
542         dedata = &pfxdata->define_extent;
543         lredata = &pfxdata->locate_record;
544
545         ccw->cmd_code = DASD_ECKD_CCW_PFX;
546         ccw->flags = 0;
547         if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
548                 ccw->count = sizeof(*pfxdata) + 2;
549                 ccw->cda = (__u32)virt_to_phys(pfxdata);
550                 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
551         } else {
552                 ccw->count = sizeof(*pfxdata);
553                 ccw->cda = (__u32)virt_to_phys(pfxdata);
554                 memset(pfxdata, 0, sizeof(*pfxdata));
555         }
556
557         /* prefix data */
558         if (format > 1) {
559                 DBF_DEV_EVENT(DBF_ERR, basedev,
560                               "PFX LRE unknown format 0x%x", format);
561                 BUG();
562                 return -EINVAL;
563         }
564         pfxdata->format = format;
565         pfxdata->base_address = basepriv->conf.ned->unit_addr;
566         pfxdata->base_lss = basepriv->conf.ned->ID;
567         pfxdata->validity.define_extent = 1;
568
569         /* private uid is kept up to date, conf_data may be outdated */
570         if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
571                 pfxdata->validity.verify_base = 1;
572
573         if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
574                 pfxdata->validity.verify_base = 1;
575                 pfxdata->validity.hyper_pav = 1;
576         }
577
578         rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
579
580         /*
581          * For some commands the System Time Stamp is set in the define extent
582          * data when XRC is supported. The validity of the time stamp must be
583          * reflected in the prefix data as well.
584          */
585         if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
586                 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid'   */
587
588         if (format == 1) {
589                 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
590                                   basedev, blksize, tlf);
591         }
592
593         return rc;
594 }
595
596 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
597                   unsigned int trk, unsigned int totrk, int cmd,
598                   struct dasd_device *basedev, struct dasd_device *startdev)
599 {
600         return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
601                           0, 0, 0, 0, 0);
602 }
603
604 static void
605 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
606               unsigned int rec_on_trk, int no_rec, int cmd,
607               struct dasd_device * device, int reclen)
608 {
609         struct dasd_eckd_private *private = device->private;
610         int sector;
611         int dn, d;
612
613         DBF_DEV_EVENT(DBF_INFO, device,
614                   "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
615                   trk, rec_on_trk, no_rec, cmd, reclen);
616
617         ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
618         ccw->flags = 0;
619         ccw->count = 16;
620         ccw->cda = (__u32)virt_to_phys(data);
621
622         memset(data, 0, sizeof(struct LO_eckd_data));
623         sector = 0;
624         if (rec_on_trk) {
625                 switch (private->rdc_data.dev_type) {
626                 case 0x3390:
627                         dn = ceil_quot(reclen + 6, 232);
628                         d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
629                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
630                         break;
631                 case 0x3380:
632                         d = 7 + ceil_quot(reclen + 12, 32);
633                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
634                         break;
635                 }
636         }
637         data->sector = sector;
638         data->count = no_rec;
639         switch (cmd) {
640         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
641                 data->operation.orientation = 0x3;
642                 data->operation.operation = 0x03;
643                 break;
644         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
645                 data->operation.orientation = 0x3;
646                 data->operation.operation = 0x16;
647                 break;
648         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
649                 data->operation.orientation = 0x1;
650                 data->operation.operation = 0x03;
651                 data->count++;
652                 break;
653         case DASD_ECKD_CCW_READ_RECORD_ZERO:
654                 data->operation.orientation = 0x3;
655                 data->operation.operation = 0x16;
656                 data->count++;
657                 break;
658         case DASD_ECKD_CCW_WRITE:
659         case DASD_ECKD_CCW_WRITE_MT:
660         case DASD_ECKD_CCW_WRITE_KD:
661         case DASD_ECKD_CCW_WRITE_KD_MT:
662                 data->auxiliary.last_bytes_used = 0x1;
663                 data->length = reclen;
664                 data->operation.operation = 0x01;
665                 break;
666         case DASD_ECKD_CCW_WRITE_CKD:
667         case DASD_ECKD_CCW_WRITE_CKD_MT:
668                 data->auxiliary.last_bytes_used = 0x1;
669                 data->length = reclen;
670                 data->operation.operation = 0x03;
671                 break;
672         case DASD_ECKD_CCW_READ:
673         case DASD_ECKD_CCW_READ_MT:
674         case DASD_ECKD_CCW_READ_KD:
675         case DASD_ECKD_CCW_READ_KD_MT:
676                 data->auxiliary.last_bytes_used = 0x1;
677                 data->length = reclen;
678                 data->operation.operation = 0x06;
679                 break;
680         case DASD_ECKD_CCW_READ_CKD:
681         case DASD_ECKD_CCW_READ_CKD_MT:
682                 data->auxiliary.last_bytes_used = 0x1;
683                 data->length = reclen;
684                 data->operation.operation = 0x16;
685                 break;
686         case DASD_ECKD_CCW_READ_COUNT:
687                 data->operation.operation = 0x06;
688                 break;
689         case DASD_ECKD_CCW_ERASE:
690                 data->length = reclen;
691                 data->auxiliary.last_bytes_used = 0x1;
692                 data->operation.operation = 0x0b;
693                 break;
694         default:
695                 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
696                               "opcode 0x%x", cmd);
697         }
698         set_ch_t(&data->seek_addr,
699                  trk / private->rdc_data.trk_per_cyl,
700                  trk % private->rdc_data.trk_per_cyl);
701         data->search_arg.cyl = data->seek_addr.cyl;
702         data->search_arg.head = data->seek_addr.head;
703         data->search_arg.record = rec_on_trk;
704 }
705
706 /*
707  * Returns 1 if the block is one of the special blocks that needs
708  * to get read/written with the KD variant of the command.
709  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
710  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
711  * Luckily the KD variants differ only by one bit (0x08) from the
712  * normal variant. So don't wonder about code like:
713  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
714  *         ccw->cmd_code |= 0x8;
715  */
716 static inline int
717 dasd_eckd_cdl_special(int blk_per_trk, int recid)
718 {
719         if (recid < 3)
720                 return 1;
721         if (recid < blk_per_trk)
722                 return 0;
723         if (recid < 2 * blk_per_trk)
724                 return 1;
725         return 0;
726 }
727
728 /*
729  * Returns the record size for the special blocks of the cdl format.
730  * Only returns something useful if dasd_eckd_cdl_special is true
731  * for the recid.
732  */
733 static inline int
734 dasd_eckd_cdl_reclen(int recid)
735 {
736         if (recid < 3)
737                 return sizes_trk0[recid];
738         return LABEL_SIZE;
739 }
740 /* create unique id from private structure. */
741 static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
742 {
743         int count;
744
745         memset(uid, 0, sizeof(struct dasd_uid));
746         memcpy(uid->vendor, conf->ned->HDA_manufacturer,
747                sizeof(uid->vendor) - 1);
748         EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
749         memcpy(uid->serial, &conf->ned->serial,
750                sizeof(uid->serial) - 1);
751         EBCASC(uid->serial, sizeof(uid->serial) - 1);
752         uid->ssid = conf->gneq->subsystemID;
753         uid->real_unit_addr = conf->ned->unit_addr;
754         if (conf->sneq) {
755                 uid->type = conf->sneq->sua_flags;
756                 if (uid->type == UA_BASE_PAV_ALIAS)
757                         uid->base_unit_addr = conf->sneq->base_unit_addr;
758         } else {
759                 uid->type = UA_BASE_DEVICE;
760         }
761         if (conf->vdsneq) {
762                 for (count = 0; count < 16; count++) {
763                         sprintf(uid->vduit+2*count, "%02x",
764                                 conf->vdsneq->uit[count]);
765                 }
766         }
767 }
768
769 /*
770  * Generate device unique id that specifies the physical device.
771  */
772 static int dasd_eckd_generate_uid(struct dasd_device *device)
773 {
774         struct dasd_eckd_private *private = device->private;
775         unsigned long flags;
776
777         if (!private)
778                 return -ENODEV;
779         if (!private->conf.ned || !private->conf.gneq)
780                 return -ENODEV;
781         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
782         create_uid(&private->conf, &private->uid);
783         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
784         return 0;
785 }
786
787 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
788 {
789         struct dasd_eckd_private *private = device->private;
790         unsigned long flags;
791
792         if (private) {
793                 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
794                 *uid = private->uid;
795                 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
796                 return 0;
797         }
798         return -EINVAL;
799 }
800
801 /*
802  * compare device UID with data of a given dasd_eckd_private structure
803  * return 0 for match
804  */
805 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
806                                       struct dasd_conf *path_conf)
807 {
808         struct dasd_uid device_uid;
809         struct dasd_uid path_uid;
810
811         create_uid(path_conf, &path_uid);
812         dasd_eckd_get_uid(device, &device_uid);
813
814         return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
815 }
816
817 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
818                                    struct dasd_ccw_req *cqr,
819                                    __u8 *rcd_buffer,
820                                    __u8 lpm)
821 {
822         struct ccw1 *ccw;
823         /*
824          * buffer has to start with EBCDIC "V1.0" to show
825          * support for virtual device SNEQ
826          */
827         rcd_buffer[0] = 0xE5;
828         rcd_buffer[1] = 0xF1;
829         rcd_buffer[2] = 0x4B;
830         rcd_buffer[3] = 0xF0;
831
832         ccw = cqr->cpaddr;
833         ccw->cmd_code = DASD_ECKD_CCW_RCD;
834         ccw->flags = 0;
835         ccw->cda = (__u32)virt_to_phys(rcd_buffer);
836         ccw->count = DASD_ECKD_RCD_DATA_SIZE;
837         cqr->magic = DASD_ECKD_MAGIC;
838
839         cqr->startdev = device;
840         cqr->memdev = device;
841         cqr->block = NULL;
842         cqr->expires = 10*HZ;
843         cqr->lpm = lpm;
844         cqr->retries = 256;
845         cqr->buildclk = get_tod_clock();
846         cqr->status = DASD_CQR_FILLED;
847         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
848 }
849
850 /*
851  * Wakeup helper for read_conf
852  * if the cqr is not done and needs some error recovery
853  * the buffer has to be re-initialized with the EBCDIC "V1.0"
854  * to show support for virtual device SNEQ
855  */
856 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
857 {
858         struct ccw1 *ccw;
859         __u8 *rcd_buffer;
860
861         if (cqr->status !=  DASD_CQR_DONE) {
862                 ccw = cqr->cpaddr;
863                 rcd_buffer = phys_to_virt(ccw->cda);
864                 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
865
866                 rcd_buffer[0] = 0xE5;
867                 rcd_buffer[1] = 0xF1;
868                 rcd_buffer[2] = 0x4B;
869                 rcd_buffer[3] = 0xF0;
870         }
871         dasd_wakeup_cb(cqr, data);
872 }
873
874 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
875                                            struct dasd_ccw_req *cqr,
876                                            __u8 *rcd_buffer,
877                                            __u8 lpm)
878 {
879         struct ciw *ciw;
880         int rc;
881         /*
882          * sanity check: scan for RCD command in extended SenseID data
883          * some devices do not support RCD
884          */
885         ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
886         if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
887                 return -EOPNOTSUPP;
888
889         dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
890         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
891         set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
892         cqr->retries = 5;
893         cqr->callback = read_conf_cb;
894         rc = dasd_sleep_on_immediatly(cqr);
895         return rc;
896 }
897
898 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
899                                    void **rcd_buffer,
900                                    int *rcd_buffer_size, __u8 lpm)
901 {
902         struct ciw *ciw;
903         char *rcd_buf = NULL;
904         int ret;
905         struct dasd_ccw_req *cqr;
906
907         /*
908          * sanity check: scan for RCD command in extended SenseID data
909          * some devices do not support RCD
910          */
911         ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
912         if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
913                 ret = -EOPNOTSUPP;
914                 goto out_error;
915         }
916         rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
917         if (!rcd_buf) {
918                 ret = -ENOMEM;
919                 goto out_error;
920         }
921         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
922                                    0, /* use rcd_buf as data ara */
923                                    device, NULL);
924         if (IS_ERR(cqr)) {
925                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
926                               "Could not allocate RCD request");
927                 ret = -ENOMEM;
928                 goto out_error;
929         }
930         dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
931         cqr->callback = read_conf_cb;
932         ret = dasd_sleep_on(cqr);
933         /*
934          * on success we update the user input parms
935          */
936         dasd_sfree_request(cqr, cqr->memdev);
937         if (ret)
938                 goto out_error;
939
940         *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
941         *rcd_buffer = rcd_buf;
942         return 0;
943 out_error:
944         kfree(rcd_buf);
945         *rcd_buffer = NULL;
946         *rcd_buffer_size = 0;
947         return ret;
948 }
949
950 static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
951 {
952
953         struct dasd_sneq *sneq;
954         int i, count;
955
956         conf->ned = NULL;
957         conf->sneq = NULL;
958         conf->vdsneq = NULL;
959         conf->gneq = NULL;
960         count = conf->len / sizeof(struct dasd_sneq);
961         sneq = (struct dasd_sneq *)conf->data;
962         for (i = 0; i < count; ++i) {
963                 if (sneq->flags.identifier == 1 && sneq->format == 1)
964                         conf->sneq = sneq;
965                 else if (sneq->flags.identifier == 1 && sneq->format == 4)
966                         conf->vdsneq = (struct vd_sneq *)sneq;
967                 else if (sneq->flags.identifier == 2)
968                         conf->gneq = (struct dasd_gneq *)sneq;
969                 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
970                         conf->ned = (struct dasd_ned *)sneq;
971                 sneq++;
972         }
973         if (!conf->ned || !conf->gneq) {
974                 conf->ned = NULL;
975                 conf->sneq = NULL;
976                 conf->vdsneq = NULL;
977                 conf->gneq = NULL;
978                 return -EINVAL;
979         }
980         return 0;
981
982 };
983
984 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
985 {
986         struct dasd_gneq *gneq;
987         int i, count, found;
988
989         count = conf_len / sizeof(*gneq);
990         gneq = (struct dasd_gneq *)conf_data;
991         found = 0;
992         for (i = 0; i < count; ++i) {
993                 if (gneq->flags.identifier == 2) {
994                         found = 1;
995                         break;
996                 }
997                 gneq++;
998         }
999         if (found)
1000                 return ((char *)gneq)[18] & 0x07;
1001         else
1002                 return 0;
1003 }
1004
1005 static void dasd_eckd_store_conf_data(struct dasd_device *device,
1006                                       struct dasd_conf_data *conf_data, int chp)
1007 {
1008         struct dasd_eckd_private *private = device->private;
1009         struct channel_path_desc_fmt0 *chp_desc;
1010         struct subchannel_id sch_id;
1011         void *cdp;
1012
1013         /*
1014          * path handling and read_conf allocate data
1015          * free it before replacing the pointer
1016          * also replace the old private->conf_data pointer
1017          * with the new one if this points to the same data
1018          */
1019         cdp = device->path[chp].conf_data;
1020         if (private->conf.data == cdp) {
1021                 private->conf.data = (void *)conf_data;
1022                 dasd_eckd_identify_conf_parts(&private->conf);
1023         }
1024         ccw_device_get_schid(device->cdev, &sch_id);
1025         device->path[chp].conf_data = conf_data;
1026         device->path[chp].cssid = sch_id.cssid;
1027         device->path[chp].ssid = sch_id.ssid;
1028         chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
1029         if (chp_desc)
1030                 device->path[chp].chpid = chp_desc->chpid;
1031         kfree(chp_desc);
1032         kfree(cdp);
1033 }
1034
1035 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1036 {
1037         struct dasd_eckd_private *private = device->private;
1038         int i;
1039
1040         private->conf.data = NULL;
1041         private->conf.len = 0;
1042         for (i = 0; i < 8; i++) {
1043                 kfree(device->path[i].conf_data);
1044                 device->path[i].conf_data = NULL;
1045                 device->path[i].cssid = 0;
1046                 device->path[i].ssid = 0;
1047                 device->path[i].chpid = 0;
1048                 dasd_path_notoper(device, i);
1049         }
1050 }
1051
1052 static void dasd_eckd_read_fc_security(struct dasd_device *device)
1053 {
1054         struct dasd_eckd_private *private = device->private;
1055         u8 esm_valid;
1056         u8 esm[8];
1057         int chp;
1058         int rc;
1059
1060         rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
1061         if (rc) {
1062                 for (chp = 0; chp < 8; chp++)
1063                         device->path[chp].fc_security = 0;
1064                 return;
1065         }
1066
1067         for (chp = 0; chp < 8; chp++) {
1068                 if (esm_valid & (0x80 >> chp))
1069                         device->path[chp].fc_security = esm[chp];
1070                 else
1071                         device->path[chp].fc_security = 0;
1072         }
1073 }
1074
1075 static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
1076                                      char *print_uid)
1077 {
1078         struct dasd_uid uid;
1079
1080         create_uid(conf, &uid);
1081         if (strlen(uid.vduit) > 0)
1082                 snprintf(print_uid, DASD_UID_STRLEN,
1083                          "%s.%s.%04x.%02x.%s",
1084                          uid.vendor, uid.serial, uid.ssid,
1085                          uid.real_unit_addr, uid.vduit);
1086         else
1087                 snprintf(print_uid, DASD_UID_STRLEN,
1088                          "%s.%s.%04x.%02x",
1089                          uid.vendor, uid.serial, uid.ssid,
1090                          uid.real_unit_addr);
1091 }
1092
1093 static int dasd_eckd_check_cabling(struct dasd_device *device,
1094                                    void *conf_data, __u8 lpm)
1095 {
1096         char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
1097         struct dasd_eckd_private *private = device->private;
1098         struct dasd_conf path_conf;
1099
1100         path_conf.data = conf_data;
1101         path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
1102         if (dasd_eckd_identify_conf_parts(&path_conf))
1103                 return 1;
1104
1105         if (dasd_eckd_compare_path_uid(device, &path_conf)) {
1106                 dasd_eckd_get_uid_string(&path_conf, print_path_uid);
1107                 dasd_eckd_get_uid_string(&private->conf, print_device_uid);
1108                 dev_err(&device->cdev->dev,
1109                         "Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
1110                         lpm, print_path_uid, print_device_uid);
1111                 return 1;
1112         }
1113
1114         return 0;
1115 }
1116
1117 static int dasd_eckd_read_conf(struct dasd_device *device)
1118 {
1119         void *conf_data;
1120         int conf_len, conf_data_saved;
1121         int rc, path_err, pos;
1122         __u8 lpm, opm;
1123         struct dasd_eckd_private *private;
1124
1125         private = device->private;
1126         opm = ccw_device_get_path_mask(device->cdev);
1127         conf_data_saved = 0;
1128         path_err = 0;
1129         /* get configuration data per operational path */
1130         for (lpm = 0x80; lpm; lpm>>= 1) {
1131                 if (!(lpm & opm))
1132                         continue;
1133                 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1134                                              &conf_len, lpm);
1135                 if (rc && rc != -EOPNOTSUPP) {  /* -EOPNOTSUPP is ok */
1136                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1137                                         "Read configuration data returned "
1138                                         "error %d", rc);
1139                         return rc;
1140                 }
1141                 if (conf_data == NULL) {
1142                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1143                                         "No configuration data "
1144                                         "retrieved");
1145                         /* no further analysis possible */
1146                         dasd_path_add_opm(device, opm);
1147                         continue;       /* no error */
1148                 }
1149                 /* save first valid configuration data */
1150                 if (!conf_data_saved) {
1151                         /* initially clear previously stored conf_data */
1152                         dasd_eckd_clear_conf_data(device);
1153                         private->conf.data = conf_data;
1154                         private->conf.len = conf_len;
1155                         if (dasd_eckd_identify_conf_parts(&private->conf)) {
1156                                 private->conf.data = NULL;
1157                                 private->conf.len = 0;
1158                                 kfree(conf_data);
1159                                 continue;
1160                         }
1161                         /*
1162                          * build device UID that other path data
1163                          * can be compared to it
1164                          */
1165                         dasd_eckd_generate_uid(device);
1166                         conf_data_saved++;
1167                 } else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
1168                         dasd_path_add_cablepm(device, lpm);
1169                         path_err = -EINVAL;
1170                         kfree(conf_data);
1171                         continue;
1172                 }
1173
1174                 pos = pathmask_to_pos(lpm);
1175                 dasd_eckd_store_conf_data(device, conf_data, pos);
1176
1177                 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1178                 case 0x02:
1179                         dasd_path_add_nppm(device, lpm);
1180                         break;
1181                 case 0x03:
1182                         dasd_path_add_ppm(device, lpm);
1183                         break;
1184                 }
1185                 if (!dasd_path_get_opm(device)) {
1186                         dasd_path_set_opm(device, lpm);
1187                         dasd_generic_path_operational(device);
1188                 } else {
1189                         dasd_path_add_opm(device, lpm);
1190                 }
1191         }
1192
1193         return path_err;
1194 }
1195
1196 static u32 get_fcx_max_data(struct dasd_device *device)
1197 {
1198         struct dasd_eckd_private *private = device->private;
1199         int fcx_in_css, fcx_in_gneq, fcx_in_features;
1200         unsigned int mdc;
1201         int tpm;
1202
1203         if (dasd_nofcx)
1204                 return 0;
1205         /* is transport mode supported? */
1206         fcx_in_css = css_general_characteristics.fcx;
1207         fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
1208         fcx_in_features = private->features.feature[40] & 0x80;
1209         tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1210
1211         if (!tpm)
1212                 return 0;
1213
1214         mdc = ccw_device_get_mdc(device->cdev, 0);
1215         if (mdc == 0) {
1216                 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1217                 return 0;
1218         } else {
1219                 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1220         }
1221 }
1222
1223 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1224 {
1225         struct dasd_eckd_private *private = device->private;
1226         unsigned int mdc;
1227         u32 fcx_max_data;
1228
1229         if (private->fcx_max_data) {
1230                 mdc = ccw_device_get_mdc(device->cdev, lpm);
1231                 if (mdc == 0) {
1232                         dev_warn(&device->cdev->dev,
1233                                  "Detecting the maximum data size for zHPF "
1234                                  "requests failed (rc=%d) for a new path %x\n",
1235                                  mdc, lpm);
1236                         return mdc;
1237                 }
1238                 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1239                 if (fcx_max_data < private->fcx_max_data) {
1240                         dev_warn(&device->cdev->dev,
1241                                  "The maximum data size for zHPF requests %u "
1242                                  "on a new path %x is below the active maximum "
1243                                  "%u\n", fcx_max_data, lpm,
1244                                  private->fcx_max_data);
1245                         return -EACCES;
1246                 }
1247         }
1248         return 0;
1249 }
1250
1251 static int rebuild_device_uid(struct dasd_device *device,
1252                               struct pe_handler_work_data *data)
1253 {
1254         struct dasd_eckd_private *private = device->private;
1255         __u8 lpm, opm = dasd_path_get_opm(device);
1256         int rc = -ENODEV;
1257
1258         for (lpm = 0x80; lpm; lpm >>= 1) {
1259                 if (!(lpm & opm))
1260                         continue;
1261                 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1262                 memset(&data->cqr, 0, sizeof(data->cqr));
1263                 data->cqr.cpaddr = &data->ccw;
1264                 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1265                                                      data->rcd_buffer,
1266                                                      lpm);
1267
1268                 if (rc) {
1269                         if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1270                                 continue;
1271                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1272                                         "Read configuration data "
1273                                         "returned error %d", rc);
1274                         break;
1275                 }
1276                 memcpy(private->conf.data, data->rcd_buffer,
1277                        DASD_ECKD_RCD_DATA_SIZE);
1278                 if (dasd_eckd_identify_conf_parts(&private->conf)) {
1279                         rc = -ENODEV;
1280                 } else /* first valid path is enough */
1281                         break;
1282         }
1283
1284         if (!rc)
1285                 rc = dasd_eckd_generate_uid(device);
1286
1287         return rc;
1288 }
1289
1290 static void dasd_eckd_path_available_action(struct dasd_device *device,
1291                                             struct pe_handler_work_data *data)
1292 {
1293         __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1294         __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1295         struct dasd_conf_data *conf_data;
1296         char print_uid[DASD_UID_STRLEN];
1297         struct dasd_conf path_conf;
1298         unsigned long flags;
1299         int rc, pos;
1300
1301         opm = 0;
1302         npm = 0;
1303         ppm = 0;
1304         epm = 0;
1305         hpfpm = 0;
1306         cablepm = 0;
1307
1308         for (lpm = 0x80; lpm; lpm >>= 1) {
1309                 if (!(lpm & data->tbvpm))
1310                         continue;
1311                 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1312                 memset(&data->cqr, 0, sizeof(data->cqr));
1313                 data->cqr.cpaddr = &data->ccw;
1314                 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1315                                                      data->rcd_buffer,
1316                                                      lpm);
1317                 if (!rc) {
1318                         switch (dasd_eckd_path_access(data->rcd_buffer,
1319                                                       DASD_ECKD_RCD_DATA_SIZE)
1320                                 ) {
1321                         case 0x02:
1322                                 npm |= lpm;
1323                                 break;
1324                         case 0x03:
1325                                 ppm |= lpm;
1326                                 break;
1327                         }
1328                         opm |= lpm;
1329                 } else if (rc == -EOPNOTSUPP) {
1330                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1331                                         "path verification: No configuration "
1332                                         "data retrieved");
1333                         opm |= lpm;
1334                 } else if (rc == -EAGAIN) {
1335                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1336                                         "path verification: device is stopped,"
1337                                         " try again later");
1338                         epm |= lpm;
1339                 } else {
1340                         dev_warn(&device->cdev->dev,
1341                                  "Reading device feature codes failed "
1342                                  "(rc=%d) for new path %x\n", rc, lpm);
1343                         continue;
1344                 }
1345                 if (verify_fcx_max_data(device, lpm)) {
1346                         opm &= ~lpm;
1347                         npm &= ~lpm;
1348                         ppm &= ~lpm;
1349                         hpfpm |= lpm;
1350                         continue;
1351                 }
1352
1353                 /*
1354                  * save conf_data for comparison after
1355                  * rebuild_device_uid may have changed
1356                  * the original data
1357                  */
1358                 memcpy(&path_rcd_buf, data->rcd_buffer,
1359                        DASD_ECKD_RCD_DATA_SIZE);
1360                 path_conf.data = (void *)&path_rcd_buf;
1361                 path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
1362                 if (dasd_eckd_identify_conf_parts(&path_conf)) {
1363                         path_conf.data = NULL;
1364                         path_conf.len = 0;
1365                         continue;
1366                 }
1367
1368                 /*
1369                  * compare path UID with device UID only if at least
1370                  * one valid path is left
1371                  * in other case the device UID may have changed and
1372                  * the first working path UID will be used as device UID
1373                  */
1374                 if (dasd_path_get_opm(device) &&
1375                     dasd_eckd_compare_path_uid(device, &path_conf)) {
1376                         /*
1377                          * the comparison was not successful
1378                          * rebuild the device UID with at least one
1379                          * known path in case a z/VM hyperswap command
1380                          * has changed the device
1381                          *
1382                          * after this compare again
1383                          *
1384                          * if either the rebuild or the recompare fails
1385                          * the path can not be used
1386                          */
1387                         if (rebuild_device_uid(device, data) ||
1388                             dasd_eckd_compare_path_uid(
1389                                     device, &path_conf)) {
1390                                 dasd_eckd_get_uid_string(&path_conf, print_uid);
1391                                 dev_err(&device->cdev->dev,
1392                                         "The newly added channel path %02X "
1393                                         "will not be used because it leads "
1394                                         "to a different device %s\n",
1395                                         lpm, print_uid);
1396                                 opm &= ~lpm;
1397                                 npm &= ~lpm;
1398                                 ppm &= ~lpm;
1399                                 cablepm |= lpm;
1400                                 continue;
1401                         }
1402                 }
1403
1404                 conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
1405                 if (conf_data) {
1406                         memcpy(conf_data, data->rcd_buffer,
1407                                DASD_ECKD_RCD_DATA_SIZE);
1408                 } else {
1409                         /*
1410                          * path is operational but path config data could not
1411                          * be stored due to low mem condition
1412                          * add it to the error path mask and schedule a path
1413                          * verification later that this could be added again
1414                          */
1415                         epm |= lpm;
1416                 }
1417                 pos = pathmask_to_pos(lpm);
1418                 dasd_eckd_store_conf_data(device, conf_data, pos);
1419
1420                 /*
1421                  * There is a small chance that a path is lost again between
1422                  * above path verification and the following modification of
1423                  * the device opm mask. We could avoid that race here by using
1424                  * yet another path mask, but we rather deal with this unlikely
1425                  * situation in dasd_start_IO.
1426                  */
1427                 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1428                 if (!dasd_path_get_opm(device) && opm) {
1429                         dasd_path_set_opm(device, opm);
1430                         dasd_generic_path_operational(device);
1431                 } else {
1432                         dasd_path_add_opm(device, opm);
1433                 }
1434                 dasd_path_add_nppm(device, npm);
1435                 dasd_path_add_ppm(device, ppm);
1436                 if (epm) {
1437                         dasd_path_add_tbvpm(device, epm);
1438                         dasd_device_set_timer(device, 50);
1439                 }
1440                 dasd_path_add_cablepm(device, cablepm);
1441                 dasd_path_add_nohpfpm(device, hpfpm);
1442                 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1443
1444                 dasd_path_create_kobj(device, pos);
1445         }
1446 }
1447
1448 static void do_pe_handler_work(struct work_struct *work)
1449 {
1450         struct pe_handler_work_data *data;
1451         struct dasd_device *device;
1452
1453         data = container_of(work, struct pe_handler_work_data, worker);
1454         device = data->device;
1455
1456         /* delay path verification until device was resumed */
1457         if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1458                 schedule_work(work);
1459                 return;
1460         }
1461         /* check if path verification already running and delay if so */
1462         if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1463                 schedule_work(work);
1464                 return;
1465         }
1466
1467         if (data->tbvpm)
1468                 dasd_eckd_path_available_action(device, data);
1469         if (data->fcsecpm)
1470                 dasd_eckd_read_fc_security(device);
1471
1472         clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1473         dasd_put_device(device);
1474         if (data->isglobal)
1475                 mutex_unlock(&dasd_pe_handler_mutex);
1476         else
1477                 kfree(data);
1478 }
1479
1480 static int dasd_eckd_pe_handler(struct dasd_device *device,
1481                                 __u8 tbvpm, __u8 fcsecpm)
1482 {
1483         struct pe_handler_work_data *data;
1484
1485         data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1486         if (!data) {
1487                 if (mutex_trylock(&dasd_pe_handler_mutex)) {
1488                         data = pe_handler_worker;
1489                         data->isglobal = 1;
1490                 } else {
1491                         return -ENOMEM;
1492                 }
1493         }
1494         INIT_WORK(&data->worker, do_pe_handler_work);
1495         dasd_get_device(device);
1496         data->device = device;
1497         data->tbvpm = tbvpm;
1498         data->fcsecpm = fcsecpm;
1499         schedule_work(&data->worker);
1500         return 0;
1501 }
1502
1503 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1504 {
1505         struct dasd_eckd_private *private = device->private;
1506         unsigned long flags;
1507
1508         if (!private->fcx_max_data)
1509                 private->fcx_max_data = get_fcx_max_data(device);
1510         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1511         dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1512         dasd_schedule_device_bh(device);
1513         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1514 }
1515
1516 static int dasd_eckd_read_features(struct dasd_device *device)
1517 {
1518         struct dasd_eckd_private *private = device->private;
1519         struct dasd_psf_prssd_data *prssdp;
1520         struct dasd_rssd_features *features;
1521         struct dasd_ccw_req *cqr;
1522         struct ccw1 *ccw;
1523         int rc;
1524
1525         memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1526         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1527                                    (sizeof(struct dasd_psf_prssd_data) +
1528                                     sizeof(struct dasd_rssd_features)),
1529                                    device, NULL);
1530         if (IS_ERR(cqr)) {
1531                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1532                                 "allocate initialization request");
1533                 return PTR_ERR(cqr);
1534         }
1535         cqr->startdev = device;
1536         cqr->memdev = device;
1537         cqr->block = NULL;
1538         cqr->retries = 256;
1539         cqr->expires = 10 * HZ;
1540
1541         /* Prepare for Read Subsystem Data */
1542         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1543         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1544         prssdp->order = PSF_ORDER_PRSSD;
1545         prssdp->suborder = 0x41;        /* Read Feature Codes */
1546         /* all other bytes of prssdp must be zero */
1547
1548         ccw = cqr->cpaddr;
1549         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1550         ccw->count = sizeof(struct dasd_psf_prssd_data);
1551         ccw->flags |= CCW_FLAG_CC;
1552         ccw->cda = (__u32)virt_to_phys(prssdp);
1553
1554         /* Read Subsystem Data - feature codes */
1555         features = (struct dasd_rssd_features *) (prssdp + 1);
1556         memset(features, 0, sizeof(struct dasd_rssd_features));
1557
1558         ccw++;
1559         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1560         ccw->count = sizeof(struct dasd_rssd_features);
1561         ccw->cda = (__u32)virt_to_phys(features);
1562
1563         cqr->buildclk = get_tod_clock();
1564         cqr->status = DASD_CQR_FILLED;
1565         rc = dasd_sleep_on(cqr);
1566         if (rc == 0) {
1567                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1568                 features = (struct dasd_rssd_features *) (prssdp + 1);
1569                 memcpy(&private->features, features,
1570                        sizeof(struct dasd_rssd_features));
1571         } else
1572                 dev_warn(&device->cdev->dev, "Reading device feature codes"
1573                          " failed with rc=%d\n", rc);
1574         dasd_sfree_request(cqr, cqr->memdev);
1575         return rc;
1576 }
1577
1578 /* Read Volume Information - Volume Storage Query */
1579 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1580 {
1581         struct dasd_eckd_private *private = device->private;
1582         struct dasd_psf_prssd_data *prssdp;
1583         struct dasd_rssd_vsq *vsq;
1584         struct dasd_ccw_req *cqr;
1585         struct ccw1 *ccw;
1586         int useglobal;
1587         int rc;
1588
1589         /* This command cannot be executed on an alias device */
1590         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1591             private->uid.type == UA_HYPER_PAV_ALIAS)
1592                 return 0;
1593
1594         useglobal = 0;
1595         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1596                                    sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1597         if (IS_ERR(cqr)) {
1598                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1599                                 "Could not allocate initialization request");
1600                 mutex_lock(&dasd_vol_info_mutex);
1601                 useglobal = 1;
1602                 cqr = &dasd_vol_info_req->cqr;
1603                 memset(cqr, 0, sizeof(*cqr));
1604                 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1605                 cqr->cpaddr = &dasd_vol_info_req->ccw;
1606                 cqr->data = &dasd_vol_info_req->data;
1607                 cqr->magic = DASD_ECKD_MAGIC;
1608         }
1609
1610         /* Prepare for Read Subsystem Data */
1611         prssdp = cqr->data;
1612         prssdp->order = PSF_ORDER_PRSSD;
1613         prssdp->suborder = PSF_SUBORDER_VSQ;    /* Volume Storage Query */
1614         prssdp->lss = private->conf.ned->ID;
1615         prssdp->volume = private->conf.ned->unit_addr;
1616
1617         ccw = cqr->cpaddr;
1618         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1619         ccw->count = sizeof(*prssdp);
1620         ccw->flags |= CCW_FLAG_CC;
1621         ccw->cda = (__u32)virt_to_phys(prssdp);
1622
1623         /* Read Subsystem Data - Volume Storage Query */
1624         vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1625         memset(vsq, 0, sizeof(*vsq));
1626
1627         ccw++;
1628         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1629         ccw->count = sizeof(*vsq);
1630         ccw->flags |= CCW_FLAG_SLI;
1631         ccw->cda = (__u32)virt_to_phys(vsq);
1632
1633         cqr->buildclk = get_tod_clock();
1634         cqr->status = DASD_CQR_FILLED;
1635         cqr->startdev = device;
1636         cqr->memdev = device;
1637         cqr->block = NULL;
1638         cqr->retries = 256;
1639         cqr->expires = device->default_expires * HZ;
1640         /* The command might not be supported. Suppress the error output */
1641         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1642
1643         rc = dasd_sleep_on_interruptible(cqr);
1644         if (rc == 0) {
1645                 memcpy(&private->vsq, vsq, sizeof(*vsq));
1646         } else {
1647                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1648                                 "Reading the volume storage information failed with rc=%d", rc);
1649         }
1650
1651         if (useglobal)
1652                 mutex_unlock(&dasd_vol_info_mutex);
1653         else
1654                 dasd_sfree_request(cqr, cqr->memdev);
1655
1656         return rc;
1657 }
1658
1659 static int dasd_eckd_is_ese(struct dasd_device *device)
1660 {
1661         struct dasd_eckd_private *private = device->private;
1662
1663         return private->vsq.vol_info.ese;
1664 }
1665
1666 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1667 {
1668         struct dasd_eckd_private *private = device->private;
1669
1670         return private->vsq.extent_pool_id;
1671 }
1672
1673 /*
1674  * This value represents the total amount of available space. As more space is
1675  * allocated by ESE volumes, this value will decrease.
1676  * The data for this value is therefore updated on any call.
1677  */
1678 static int dasd_eckd_space_configured(struct dasd_device *device)
1679 {
1680         struct dasd_eckd_private *private = device->private;
1681         int rc;
1682
1683         rc = dasd_eckd_read_vol_info(device);
1684
1685         return rc ? : private->vsq.space_configured;
1686 }
1687
1688 /*
1689  * The value of space allocated by an ESE volume may have changed and is
1690  * therefore updated on any call.
1691  */
1692 static int dasd_eckd_space_allocated(struct dasd_device *device)
1693 {
1694         struct dasd_eckd_private *private = device->private;
1695         int rc;
1696
1697         rc = dasd_eckd_read_vol_info(device);
1698
1699         return rc ? : private->vsq.space_allocated;
1700 }
1701
1702 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1703 {
1704         struct dasd_eckd_private *private = device->private;
1705
1706         return private->vsq.logical_capacity;
1707 }
1708
1709 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1710 {
1711         struct ext_pool_exhaust_work_data *data;
1712         struct dasd_device *device;
1713         struct dasd_device *base;
1714
1715         data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1716         device = data->device;
1717         base = data->base;
1718
1719         if (!base)
1720                 base = device;
1721         if (dasd_eckd_space_configured(base) != 0) {
1722                 dasd_generic_space_avail(device);
1723         } else {
1724                 dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1725                 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1726         }
1727
1728         dasd_put_device(device);
1729         kfree(data);
1730 }
1731
1732 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1733                                       struct dasd_ccw_req *cqr)
1734 {
1735         struct ext_pool_exhaust_work_data *data;
1736
1737         data = kzalloc(sizeof(*data), GFP_ATOMIC);
1738         if (!data)
1739                 return -ENOMEM;
1740         INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1741         dasd_get_device(device);
1742         data->device = device;
1743
1744         if (cqr->block)
1745                 data->base = cqr->block->base;
1746         else if (cqr->basedev)
1747                 data->base = cqr->basedev;
1748         else
1749                 data->base = NULL;
1750
1751         schedule_work(&data->worker);
1752
1753         return 0;
1754 }
1755
1756 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1757                                         struct dasd_rssd_lcq *lcq)
1758 {
1759         struct dasd_eckd_private *private = device->private;
1760         int pool_id = dasd_eckd_ext_pool_id(device);
1761         struct dasd_ext_pool_sum eps;
1762         int i;
1763
1764         for (i = 0; i < lcq->pool_count; i++) {
1765                 eps = lcq->ext_pool_sum[i];
1766                 if (eps.pool_id == pool_id) {
1767                         memcpy(&private->eps, &eps,
1768                                sizeof(struct dasd_ext_pool_sum));
1769                 }
1770         }
1771 }
1772
1773 /* Read Extent Pool Information - Logical Configuration Query */
1774 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1775 {
1776         struct dasd_eckd_private *private = device->private;
1777         struct dasd_psf_prssd_data *prssdp;
1778         struct dasd_rssd_lcq *lcq;
1779         struct dasd_ccw_req *cqr;
1780         struct ccw1 *ccw;
1781         int rc;
1782
1783         /* This command cannot be executed on an alias device */
1784         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1785             private->uid.type == UA_HYPER_PAV_ALIAS)
1786                 return 0;
1787
1788         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1789                                    sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1790         if (IS_ERR(cqr)) {
1791                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1792                                 "Could not allocate initialization request");
1793                 return PTR_ERR(cqr);
1794         }
1795
1796         /* Prepare for Read Subsystem Data */
1797         prssdp = cqr->data;
1798         memset(prssdp, 0, sizeof(*prssdp));
1799         prssdp->order = PSF_ORDER_PRSSD;
1800         prssdp->suborder = PSF_SUBORDER_LCQ;    /* Logical Configuration Query */
1801
1802         ccw = cqr->cpaddr;
1803         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1804         ccw->count = sizeof(*prssdp);
1805         ccw->flags |= CCW_FLAG_CC;
1806         ccw->cda = (__u32)virt_to_phys(prssdp);
1807
1808         lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1809         memset(lcq, 0, sizeof(*lcq));
1810
1811         ccw++;
1812         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1813         ccw->count = sizeof(*lcq);
1814         ccw->flags |= CCW_FLAG_SLI;
1815         ccw->cda = (__u32)virt_to_phys(lcq);
1816
1817         cqr->buildclk = get_tod_clock();
1818         cqr->status = DASD_CQR_FILLED;
1819         cqr->startdev = device;
1820         cqr->memdev = device;
1821         cqr->block = NULL;
1822         cqr->retries = 256;
1823         cqr->expires = device->default_expires * HZ;
1824         /* The command might not be supported. Suppress the error output */
1825         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1826
1827         rc = dasd_sleep_on_interruptible(cqr);
1828         if (rc == 0) {
1829                 dasd_eckd_cpy_ext_pool_data(device, lcq);
1830         } else {
1831                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1832                                 "Reading the logical configuration failed with rc=%d", rc);
1833         }
1834
1835         dasd_sfree_request(cqr, cqr->memdev);
1836
1837         return rc;
1838 }
1839
1840 /*
1841  * Depending on the device type, the extent size is specified either as
1842  * cylinders per extent (CKD) or size per extent (FBA)
1843  * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1844  */
1845 static int dasd_eckd_ext_size(struct dasd_device *device)
1846 {
1847         struct dasd_eckd_private *private = device->private;
1848         struct dasd_ext_pool_sum eps = private->eps;
1849
1850         if (!eps.flags.extent_size_valid)
1851                 return 0;
1852         if (eps.extent_size.size_1G)
1853                 return 1113;
1854         if (eps.extent_size.size_16M)
1855                 return 21;
1856
1857         return 0;
1858 }
1859
1860 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1861 {
1862         struct dasd_eckd_private *private = device->private;
1863
1864         return private->eps.warn_thrshld;
1865 }
1866
1867 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1868 {
1869         struct dasd_eckd_private *private = device->private;
1870
1871         return private->eps.flags.capacity_at_warnlevel;
1872 }
1873
1874 /*
1875  * Extent Pool out of space
1876  */
1877 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1878 {
1879         struct dasd_eckd_private *private = device->private;
1880
1881         return private->eps.flags.pool_oos;
1882 }
1883
1884 /*
1885  * Build CP for Perform Subsystem Function - SSC.
1886  */
1887 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1888                                                     int enable_pav)
1889 {
1890         struct dasd_ccw_req *cqr;
1891         struct dasd_psf_ssc_data *psf_ssc_data;
1892         struct ccw1 *ccw;
1893
1894         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1895                                   sizeof(struct dasd_psf_ssc_data),
1896                                    device, NULL);
1897
1898         if (IS_ERR(cqr)) {
1899                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1900                            "Could not allocate PSF-SSC request");
1901                 return cqr;
1902         }
1903         psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1904         psf_ssc_data->order = PSF_ORDER_SSC;
1905         psf_ssc_data->suborder = 0xc0;
1906         if (enable_pav) {
1907                 psf_ssc_data->suborder |= 0x08;
1908                 psf_ssc_data->reserved[0] = 0x88;
1909         }
1910         ccw = cqr->cpaddr;
1911         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1912         ccw->cda = (__u32)virt_to_phys(psf_ssc_data);
1913         ccw->count = 66;
1914
1915         cqr->startdev = device;
1916         cqr->memdev = device;
1917         cqr->block = NULL;
1918         cqr->retries = 256;
1919         cqr->expires = 10*HZ;
1920         cqr->buildclk = get_tod_clock();
1921         cqr->status = DASD_CQR_FILLED;
1922         return cqr;
1923 }
1924
1925 /*
1926  * Perform Subsystem Function.
1927  * It is necessary to trigger CIO for channel revalidation since this
1928  * call might change behaviour of DASD devices.
1929  */
1930 static int
1931 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1932                   unsigned long flags)
1933 {
1934         struct dasd_ccw_req *cqr;
1935         int rc;
1936
1937         cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1938         if (IS_ERR(cqr))
1939                 return PTR_ERR(cqr);
1940
1941         /*
1942          * set flags e.g. turn on failfast, to prevent blocking
1943          * the calling function should handle failed requests
1944          */
1945         cqr->flags |= flags;
1946
1947         rc = dasd_sleep_on(cqr);
1948         if (!rc)
1949                 /* trigger CIO to reprobe devices */
1950                 css_schedule_reprobe();
1951         else if (cqr->intrc == -EAGAIN)
1952                 rc = -EAGAIN;
1953
1954         dasd_sfree_request(cqr, cqr->memdev);
1955         return rc;
1956 }
1957
1958 /*
1959  * Valide storage server of current device.
1960  */
1961 static int dasd_eckd_validate_server(struct dasd_device *device,
1962                                      unsigned long flags)
1963 {
1964         struct dasd_eckd_private *private = device->private;
1965         int enable_pav, rc;
1966
1967         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1968             private->uid.type == UA_HYPER_PAV_ALIAS)
1969                 return 0;
1970         if (dasd_nopav || MACHINE_IS_VM)
1971                 enable_pav = 0;
1972         else
1973                 enable_pav = 1;
1974         rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1975
1976         /* may be requested feature is not available on server,
1977          * therefore just report error and go ahead */
1978         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1979                         "returned rc=%d", private->uid.ssid, rc);
1980         return rc;
1981 }
1982
1983 /*
1984  * worker to do a validate server in case of a lost pathgroup
1985  */
1986 static void dasd_eckd_do_validate_server(struct work_struct *work)
1987 {
1988         struct dasd_device *device = container_of(work, struct dasd_device,
1989                                                   kick_validate);
1990         unsigned long flags = 0;
1991
1992         set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1993         if (dasd_eckd_validate_server(device, flags)
1994             == -EAGAIN) {
1995                 /* schedule worker again if failed */
1996                 schedule_work(&device->kick_validate);
1997                 return;
1998         }
1999
2000         dasd_put_device(device);
2001 }
2002
2003 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
2004 {
2005         dasd_get_device(device);
2006         /* exit if device not online or in offline processing */
2007         if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
2008            device->state < DASD_STATE_ONLINE) {
2009                 dasd_put_device(device);
2010                 return;
2011         }
2012         /* queue call to do_validate_server to the kernel event daemon. */
2013         if (!schedule_work(&device->kick_validate))
2014                 dasd_put_device(device);
2015 }
2016
2017 /*
2018  * return if the device is the copy relation primary if a copy relation is active
2019  */
2020 static int dasd_device_is_primary(struct dasd_device *device)
2021 {
2022         if (!device->copy)
2023                 return 1;
2024
2025         if (device->copy->active->device == device)
2026                 return 1;
2027
2028         return 0;
2029 }
2030
2031 static int dasd_eckd_alloc_block(struct dasd_device *device)
2032 {
2033         struct dasd_block *block;
2034         struct dasd_uid temp_uid;
2035
2036         if (!dasd_device_is_primary(device))
2037                 return 0;
2038
2039         dasd_eckd_get_uid(device, &temp_uid);
2040         if (temp_uid.type == UA_BASE_DEVICE) {
2041                 block = dasd_alloc_block();
2042                 if (IS_ERR(block)) {
2043                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
2044                                         "could not allocate dasd block structure");
2045                         return PTR_ERR(block);
2046                 }
2047                 device->block = block;
2048                 block->base = device;
2049         }
2050         return 0;
2051 }
2052
2053 static bool dasd_eckd_pprc_enabled(struct dasd_device *device)
2054 {
2055         struct dasd_eckd_private *private = device->private;
2056
2057         return private->rdc_data.facilities.PPRC_enabled;
2058 }
2059
2060 /*
2061  * Check device characteristics.
2062  * If the device is accessible using ECKD discipline, the device is enabled.
2063  */
2064 static int
2065 dasd_eckd_check_characteristics(struct dasd_device *device)
2066 {
2067         struct dasd_eckd_private *private = device->private;
2068         int rc, i;
2069         int readonly;
2070         unsigned long value;
2071
2072         /* setup work queue for validate server*/
2073         INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
2074         /* setup work queue for summary unit check */
2075         INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
2076
2077         if (!ccw_device_is_pathgroup(device->cdev)) {
2078                 dev_warn(&device->cdev->dev,
2079                          "A channel path group could not be established\n");
2080                 return -EIO;
2081         }
2082         if (!ccw_device_is_multipath(device->cdev)) {
2083                 dev_info(&device->cdev->dev,
2084                          "The DASD is not operating in multipath mode\n");
2085         }
2086         if (!private) {
2087                 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
2088                 if (!private) {
2089                         dev_warn(&device->cdev->dev,
2090                                  "Allocating memory for private DASD data "
2091                                  "failed\n");
2092                         return -ENOMEM;
2093                 }
2094                 device->private = private;
2095         } else {
2096                 memset(private, 0, sizeof(*private));
2097         }
2098         /* Invalidate status of initial analysis. */
2099         private->init_cqr_status = -1;
2100         /* Set default cache operations. */
2101         private->attrib.operation = DASD_NORMAL_CACHE;
2102         private->attrib.nr_cyl = 0;
2103
2104         /* Read Configuration Data */
2105         rc = dasd_eckd_read_conf(device);
2106         if (rc)
2107                 goto out_err1;
2108
2109         /* set some default values */
2110         device->default_expires = DASD_EXPIRES;
2111         device->default_retries = DASD_RETRIES;
2112         device->path_thrhld = DASD_ECKD_PATH_THRHLD;
2113         device->path_interval = DASD_ECKD_PATH_INTERVAL;
2114         device->aq_timeouts = DASD_RETRIES_MAX;
2115
2116         if (private->conf.gneq) {
2117                 value = 1;
2118                 for (i = 0; i < private->conf.gneq->timeout.value; i++)
2119                         value = 10 * value;
2120                 value = value * private->conf.gneq->timeout.number;
2121                 /* do not accept useless values */
2122                 if (value != 0 && value <= DASD_EXPIRES_MAX)
2123                         device->default_expires = value;
2124         }
2125
2126         /* Read Device Characteristics */
2127         rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2128                                          &private->rdc_data, 64);
2129         if (rc) {
2130                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2131                                 "Read device characteristic failed, rc=%d", rc);
2132                 goto out_err1;
2133         }
2134
2135         /* setup PPRC for device from devmap */
2136         rc = dasd_devmap_set_device_copy_relation(device->cdev,
2137                                                   dasd_eckd_pprc_enabled(device));
2138         if (rc) {
2139                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2140                                 "copy relation setup failed, rc=%d", rc);
2141                 goto out_err1;
2142         }
2143
2144         /* check if block device is needed and allocate in case */
2145         rc = dasd_eckd_alloc_block(device);
2146         if (rc)
2147                 goto out_err1;
2148
2149         /* register lcu with alias handling, enable PAV */
2150         rc = dasd_alias_make_device_known_to_lcu(device);
2151         if (rc)
2152                 goto out_err2;
2153
2154         dasd_eckd_validate_server(device, 0);
2155
2156         /* device may report different configuration data after LCU setup */
2157         rc = dasd_eckd_read_conf(device);
2158         if (rc)
2159                 goto out_err3;
2160
2161         dasd_eckd_read_fc_security(device);
2162         dasd_path_create_kobjects(device);
2163
2164         /* Read Feature Codes */
2165         dasd_eckd_read_features(device);
2166
2167         /* Read Volume Information */
2168         dasd_eckd_read_vol_info(device);
2169
2170         /* Read Extent Pool Information */
2171         dasd_eckd_read_ext_pool_info(device);
2172
2173         if ((device->features & DASD_FEATURE_USERAW) &&
2174             !(private->rdc_data.facilities.RT_in_LR)) {
2175                 dev_err(&device->cdev->dev, "The storage server does not "
2176                         "support raw-track access\n");
2177                 rc = -EINVAL;
2178                 goto out_err3;
2179         }
2180
2181         /* find the valid cylinder size */
2182         if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2183             private->rdc_data.long_no_cyl)
2184                 private->real_cyl = private->rdc_data.long_no_cyl;
2185         else
2186                 private->real_cyl = private->rdc_data.no_cyl;
2187
2188         private->fcx_max_data = get_fcx_max_data(device);
2189
2190         readonly = dasd_device_is_ro(device);
2191         if (readonly)
2192                 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2193
2194         dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2195                  "with %d cylinders, %d heads, %d sectors%s\n",
2196                  private->rdc_data.dev_type,
2197                  private->rdc_data.dev_model,
2198                  private->rdc_data.cu_type,
2199                  private->rdc_data.cu_model.model,
2200                  private->real_cyl,
2201                  private->rdc_data.trk_per_cyl,
2202                  private->rdc_data.sec_per_trk,
2203                  readonly ? ", read-only device" : "");
2204         return 0;
2205
2206 out_err3:
2207         dasd_alias_disconnect_device_from_lcu(device);
2208 out_err2:
2209         dasd_free_block(device->block);
2210         device->block = NULL;
2211 out_err1:
2212         dasd_eckd_clear_conf_data(device);
2213         dasd_path_remove_kobjects(device);
2214         kfree(device->private);
2215         device->private = NULL;
2216         return rc;
2217 }
2218
2219 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2220 {
2221         struct dasd_eckd_private *private = device->private;
2222
2223         if (!private)
2224                 return;
2225
2226         dasd_alias_disconnect_device_from_lcu(device);
2227         private->conf.ned = NULL;
2228         private->conf.sneq = NULL;
2229         private->conf.vdsneq = NULL;
2230         private->conf.gneq = NULL;
2231         dasd_eckd_clear_conf_data(device);
2232         dasd_path_remove_kobjects(device);
2233 }
2234
2235 static struct dasd_ccw_req *
2236 dasd_eckd_analysis_ccw(struct dasd_device *device)
2237 {
2238         struct dasd_eckd_private *private = device->private;
2239         struct eckd_count *count_data;
2240         struct LO_eckd_data *LO_data;
2241         struct dasd_ccw_req *cqr;
2242         struct ccw1 *ccw;
2243         int cplength, datasize;
2244         int i;
2245
2246         cplength = 8;
2247         datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2248         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2249                                    NULL);
2250         if (IS_ERR(cqr))
2251                 return cqr;
2252         ccw = cqr->cpaddr;
2253         /* Define extent for the first 2 tracks. */
2254         define_extent(ccw++, cqr->data, 0, 1,
2255                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2256         LO_data = cqr->data + sizeof(struct DE_eckd_data);
2257         /* Locate record for the first 4 records on track 0. */
2258         ccw[-1].flags |= CCW_FLAG_CC;
2259         locate_record(ccw++, LO_data++, 0, 0, 4,
2260                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2261
2262         count_data = private->count_area;
2263         for (i = 0; i < 4; i++) {
2264                 ccw[-1].flags |= CCW_FLAG_CC;
2265                 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2266                 ccw->flags = 0;
2267                 ccw->count = 8;
2268                 ccw->cda = (__u32)virt_to_phys(count_data);
2269                 ccw++;
2270                 count_data++;
2271         }
2272
2273         /* Locate record for the first record on track 1. */
2274         ccw[-1].flags |= CCW_FLAG_CC;
2275         locate_record(ccw++, LO_data++, 1, 0, 1,
2276                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2277         /* Read count ccw. */
2278         ccw[-1].flags |= CCW_FLAG_CC;
2279         ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2280         ccw->flags = 0;
2281         ccw->count = 8;
2282         ccw->cda = (__u32)virt_to_phys(count_data);
2283
2284         cqr->block = NULL;
2285         cqr->startdev = device;
2286         cqr->memdev = device;
2287         cqr->retries = 255;
2288         cqr->buildclk = get_tod_clock();
2289         cqr->status = DASD_CQR_FILLED;
2290         /* Set flags to suppress output for expected errors */
2291         set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2292
2293         return cqr;
2294 }
2295
2296 /* differentiate between 'no record found' and any other error */
2297 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2298 {
2299         char *sense;
2300         if (init_cqr->status == DASD_CQR_DONE)
2301                 return INIT_CQR_OK;
2302         else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2303                  init_cqr->status == DASD_CQR_FAILED) {
2304                 sense = dasd_get_sense(&init_cqr->irb);
2305                 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2306                         return INIT_CQR_UNFORMATTED;
2307                 else
2308                         return INIT_CQR_ERROR;
2309         } else
2310                 return INIT_CQR_ERROR;
2311 }
2312
2313 /*
2314  * This is the callback function for the init_analysis cqr. It saves
2315  * the status of the initial analysis ccw before it frees it and kicks
2316  * the device to continue the startup sequence. This will call
2317  * dasd_eckd_do_analysis again (if the devices has not been marked
2318  * for deletion in the meantime).
2319  */
2320 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2321                                         void *data)
2322 {
2323         struct dasd_device *device = init_cqr->startdev;
2324         struct dasd_eckd_private *private = device->private;
2325
2326         private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2327         dasd_sfree_request(init_cqr, device);
2328         dasd_kick_device(device);
2329 }
2330
2331 static int dasd_eckd_start_analysis(struct dasd_block *block)
2332 {
2333         struct dasd_ccw_req *init_cqr;
2334
2335         init_cqr = dasd_eckd_analysis_ccw(block->base);
2336         if (IS_ERR(init_cqr))
2337                 return PTR_ERR(init_cqr);
2338         init_cqr->callback = dasd_eckd_analysis_callback;
2339         init_cqr->callback_data = NULL;
2340         init_cqr->expires = 5*HZ;
2341         /* first try without ERP, so we can later handle unformatted
2342          * devices as special case
2343          */
2344         clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2345         init_cqr->retries = 0;
2346         dasd_add_request_head(init_cqr);
2347         return -EAGAIN;
2348 }
2349
2350 static int dasd_eckd_end_analysis(struct dasd_block *block)
2351 {
2352         struct dasd_device *device = block->base;
2353         struct dasd_eckd_private *private = device->private;
2354         struct eckd_count *count_area;
2355         unsigned int sb, blk_per_trk;
2356         int status, i;
2357         struct dasd_ccw_req *init_cqr;
2358
2359         status = private->init_cqr_status;
2360         private->init_cqr_status = -1;
2361         if (status == INIT_CQR_ERROR) {
2362                 /* try again, this time with full ERP */
2363                 init_cqr = dasd_eckd_analysis_ccw(device);
2364                 dasd_sleep_on(init_cqr);
2365                 status = dasd_eckd_analysis_evaluation(init_cqr);
2366                 dasd_sfree_request(init_cqr, device);
2367         }
2368
2369         if (device->features & DASD_FEATURE_USERAW) {
2370                 block->bp_block = DASD_RAW_BLOCKSIZE;
2371                 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2372                 block->s2b_shift = 3;
2373                 goto raw;
2374         }
2375
2376         if (status == INIT_CQR_UNFORMATTED) {
2377                 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2378                 return -EMEDIUMTYPE;
2379         } else if (status == INIT_CQR_ERROR) {
2380                 dev_err(&device->cdev->dev,
2381                         "Detecting the DASD disk layout failed because "
2382                         "of an I/O error\n");
2383                 return -EIO;
2384         }
2385
2386         private->uses_cdl = 1;
2387         /* Check Track 0 for Compatible Disk Layout */
2388         count_area = NULL;
2389         for (i = 0; i < 3; i++) {
2390                 if (private->count_area[i].kl != 4 ||
2391                     private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2392                     private->count_area[i].cyl != 0 ||
2393                     private->count_area[i].head != count_area_head[i] ||
2394                     private->count_area[i].record != count_area_rec[i]) {
2395                         private->uses_cdl = 0;
2396                         break;
2397                 }
2398         }
2399         if (i == 3)
2400                 count_area = &private->count_area[3];
2401
2402         if (private->uses_cdl == 0) {
2403                 for (i = 0; i < 5; i++) {
2404                         if ((private->count_area[i].kl != 0) ||
2405                             (private->count_area[i].dl !=
2406                              private->count_area[0].dl) ||
2407                             private->count_area[i].cyl !=  0 ||
2408                             private->count_area[i].head != count_area_head[i] ||
2409                             private->count_area[i].record != count_area_rec[i])
2410                                 break;
2411                 }
2412                 if (i == 5)
2413                         count_area = &private->count_area[0];
2414         } else {
2415                 if (private->count_area[3].record == 1)
2416                         dev_warn(&device->cdev->dev,
2417                                  "Track 0 has no records following the VTOC\n");
2418         }
2419
2420         if (count_area != NULL && count_area->kl == 0) {
2421                 /* we found notthing violating our disk layout */
2422                 if (dasd_check_blocksize(count_area->dl) == 0)
2423                         block->bp_block = count_area->dl;
2424         }
2425         if (block->bp_block == 0) {
2426                 dev_warn(&device->cdev->dev,
2427                          "The disk layout of the DASD is not supported\n");
2428                 return -EMEDIUMTYPE;
2429         }
2430         block->s2b_shift = 0;   /* bits to shift 512 to get a block */
2431         for (sb = 512; sb < block->bp_block; sb = sb << 1)
2432                 block->s2b_shift++;
2433
2434         blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2435
2436 raw:
2437         block->blocks = ((unsigned long) private->real_cyl *
2438                           private->rdc_data.trk_per_cyl *
2439                           blk_per_trk);
2440
2441         dev_info(&device->cdev->dev,
2442                  "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2443                  "%s\n", (block->bp_block >> 10),
2444                  (((unsigned long) private->real_cyl *
2445                    private->rdc_data.trk_per_cyl *
2446                    blk_per_trk * (block->bp_block >> 9)) >> 1),
2447                  ((blk_per_trk * block->bp_block) >> 10),
2448                  private->uses_cdl ?
2449                  "compatible disk layout" : "linux disk layout");
2450
2451         return 0;
2452 }
2453
2454 static int dasd_eckd_do_analysis(struct dasd_block *block)
2455 {
2456         struct dasd_eckd_private *private = block->base->private;
2457
2458         if (private->init_cqr_status < 0)
2459                 return dasd_eckd_start_analysis(block);
2460         else
2461                 return dasd_eckd_end_analysis(block);
2462 }
2463
2464 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2465 {
2466         return dasd_alias_add_device(device);
2467 };
2468
2469 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2470 {
2471         if (cancel_work_sync(&device->reload_device))
2472                 dasd_put_device(device);
2473         if (cancel_work_sync(&device->kick_validate))
2474                 dasd_put_device(device);
2475
2476         return 0;
2477 };
2478
2479 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2480 {
2481         return dasd_alias_remove_device(device);
2482 };
2483
2484 static int
2485 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2486 {
2487         struct dasd_eckd_private *private = block->base->private;
2488
2489         if (dasd_check_blocksize(block->bp_block) == 0) {
2490                 geo->sectors = recs_per_track(&private->rdc_data,
2491                                               0, block->bp_block);
2492         }
2493         geo->cylinders = private->rdc_data.no_cyl;
2494         geo->heads = private->rdc_data.trk_per_cyl;
2495         return 0;
2496 }
2497
2498 /*
2499  * Build the TCW request for the format check
2500  */
2501 static struct dasd_ccw_req *
2502 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2503                           int enable_pav, struct eckd_count *fmt_buffer,
2504                           int rpt)
2505 {
2506         struct dasd_eckd_private *start_priv;
2507         struct dasd_device *startdev = NULL;
2508         struct tidaw *last_tidaw = NULL;
2509         struct dasd_ccw_req *cqr;
2510         struct itcw *itcw;
2511         int itcw_size;
2512         int count;
2513         int rc;
2514         int i;
2515
2516         if (enable_pav)
2517                 startdev = dasd_alias_get_start_dev(base);
2518
2519         if (!startdev)
2520                 startdev = base;
2521
2522         start_priv = startdev->private;
2523
2524         count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2525
2526         /*
2527          * we're adding 'count' amount of tidaw to the itcw.
2528          * calculate the corresponding itcw_size
2529          */
2530         itcw_size = itcw_calc_size(0, count, 0);
2531
2532         cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2533         if (IS_ERR(cqr))
2534                 return cqr;
2535
2536         start_priv->count++;
2537
2538         itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2539         if (IS_ERR(itcw)) {
2540                 rc = -EINVAL;
2541                 goto out_err;
2542         }
2543
2544         cqr->cpaddr = itcw_get_tcw(itcw);
2545         rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2546                           DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2547                           sizeof(struct eckd_count),
2548                           count * sizeof(struct eckd_count), 0, rpt);
2549         if (rc)
2550                 goto out_err;
2551
2552         for (i = 0; i < count; i++) {
2553                 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2554                                             sizeof(struct eckd_count));
2555                 if (IS_ERR(last_tidaw)) {
2556                         rc = -EINVAL;
2557                         goto out_err;
2558                 }
2559         }
2560
2561         last_tidaw->flags |= TIDAW_FLAGS_LAST;
2562         itcw_finalize(itcw);
2563
2564         cqr->cpmode = 1;
2565         cqr->startdev = startdev;
2566         cqr->memdev = startdev;
2567         cqr->basedev = base;
2568         cqr->retries = startdev->default_retries;
2569         cqr->expires = startdev->default_expires * HZ;
2570         cqr->buildclk = get_tod_clock();
2571         cqr->status = DASD_CQR_FILLED;
2572         /* Set flags to suppress output for expected errors */
2573         set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2574         set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2575
2576         return cqr;
2577
2578 out_err:
2579         dasd_sfree_request(cqr, startdev);
2580
2581         return ERR_PTR(rc);
2582 }
2583
2584 /*
2585  * Build the CCW request for the format check
2586  */
2587 static struct dasd_ccw_req *
2588 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2589                       int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2590 {
2591         struct dasd_eckd_private *start_priv;
2592         struct dasd_eckd_private *base_priv;
2593         struct dasd_device *startdev = NULL;
2594         struct dasd_ccw_req *cqr;
2595         struct ccw1 *ccw;
2596         void *data;
2597         int cplength, datasize;
2598         int use_prefix;
2599         int count;
2600         int i;
2601
2602         if (enable_pav)
2603                 startdev = dasd_alias_get_start_dev(base);
2604
2605         if (!startdev)
2606                 startdev = base;
2607
2608         start_priv = startdev->private;
2609         base_priv = base->private;
2610
2611         count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2612
2613         use_prefix = base_priv->features.feature[8] & 0x01;
2614
2615         if (use_prefix) {
2616                 cplength = 1;
2617                 datasize = sizeof(struct PFX_eckd_data);
2618         } else {
2619                 cplength = 2;
2620                 datasize = sizeof(struct DE_eckd_data) +
2621                         sizeof(struct LO_eckd_data);
2622         }
2623         cplength += count;
2624
2625         cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2626         if (IS_ERR(cqr))
2627                 return cqr;
2628
2629         start_priv->count++;
2630         data = cqr->data;
2631         ccw = cqr->cpaddr;
2632
2633         if (use_prefix) {
2634                 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2635                            DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2636                            count, 0, 0);
2637         } else {
2638                 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2639                               DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2640
2641                 data += sizeof(struct DE_eckd_data);
2642                 ccw[-1].flags |= CCW_FLAG_CC;
2643
2644                 locate_record(ccw++, data, fdata->start_unit, 0, count,
2645                               DASD_ECKD_CCW_READ_COUNT, base, 0);
2646         }
2647
2648         for (i = 0; i < count; i++) {
2649                 ccw[-1].flags |= CCW_FLAG_CC;
2650                 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2651                 ccw->flags = CCW_FLAG_SLI;
2652                 ccw->count = 8;
2653                 ccw->cda = (__u32)virt_to_phys(fmt_buffer);
2654                 ccw++;
2655                 fmt_buffer++;
2656         }
2657
2658         cqr->startdev = startdev;
2659         cqr->memdev = startdev;
2660         cqr->basedev = base;
2661         cqr->retries = DASD_RETRIES;
2662         cqr->expires = startdev->default_expires * HZ;
2663         cqr->buildclk = get_tod_clock();
2664         cqr->status = DASD_CQR_FILLED;
2665         /* Set flags to suppress output for expected errors */
2666         set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2667
2668         return cqr;
2669 }
2670
2671 static struct dasd_ccw_req *
2672 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2673                        struct format_data_t *fdata, int enable_pav)
2674 {
2675         struct dasd_eckd_private *base_priv;
2676         struct dasd_eckd_private *start_priv;
2677         struct dasd_ccw_req *fcp;
2678         struct eckd_count *ect;
2679         struct ch_t address;
2680         struct ccw1 *ccw;
2681         void *data;
2682         int rpt;
2683         int cplength, datasize;
2684         int i, j;
2685         int intensity = 0;
2686         int r0_perm;
2687         int nr_tracks;
2688         int use_prefix;
2689
2690         if (enable_pav)
2691                 startdev = dasd_alias_get_start_dev(base);
2692
2693         if (!startdev)
2694                 startdev = base;
2695
2696         start_priv = startdev->private;
2697         base_priv = base->private;
2698
2699         rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2700
2701         nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2702
2703         /*
2704          * fdata->intensity is a bit string that tells us what to do:
2705          *   Bit 0: write record zero
2706          *   Bit 1: write home address, currently not supported
2707          *   Bit 2: invalidate tracks
2708          *   Bit 3: use OS/390 compatible disk layout (cdl)
2709          *   Bit 4: do not allow storage subsystem to modify record zero
2710          * Only some bit combinations do make sense.
2711          */
2712         if (fdata->intensity & 0x10) {
2713                 r0_perm = 0;
2714                 intensity = fdata->intensity & ~0x10;
2715         } else {
2716                 r0_perm = 1;
2717                 intensity = fdata->intensity;
2718         }
2719
2720         use_prefix = base_priv->features.feature[8] & 0x01;
2721
2722         switch (intensity) {
2723         case 0x00:      /* Normal format */
2724         case 0x08:      /* Normal format, use cdl. */
2725                 cplength = 2 + (rpt*nr_tracks);
2726                 if (use_prefix)
2727                         datasize = sizeof(struct PFX_eckd_data) +
2728                                 sizeof(struct LO_eckd_data) +
2729                                 rpt * nr_tracks * sizeof(struct eckd_count);
2730                 else
2731                         datasize = sizeof(struct DE_eckd_data) +
2732                                 sizeof(struct LO_eckd_data) +
2733                                 rpt * nr_tracks * sizeof(struct eckd_count);
2734                 break;
2735         case 0x01:      /* Write record zero and format track. */
2736         case 0x09:      /* Write record zero and format track, use cdl. */
2737                 cplength = 2 + rpt * nr_tracks;
2738                 if (use_prefix)
2739                         datasize = sizeof(struct PFX_eckd_data) +
2740                                 sizeof(struct LO_eckd_data) +
2741                                 sizeof(struct eckd_count) +
2742                                 rpt * nr_tracks * sizeof(struct eckd_count);
2743                 else
2744                         datasize = sizeof(struct DE_eckd_data) +
2745                                 sizeof(struct LO_eckd_data) +
2746                                 sizeof(struct eckd_count) +
2747                                 rpt * nr_tracks * sizeof(struct eckd_count);
2748                 break;
2749         case 0x04:      /* Invalidate track. */
2750         case 0x0c:      /* Invalidate track, use cdl. */
2751                 cplength = 3;
2752                 if (use_prefix)
2753                         datasize = sizeof(struct PFX_eckd_data) +
2754                                 sizeof(struct LO_eckd_data) +
2755                                 sizeof(struct eckd_count);
2756                 else
2757                         datasize = sizeof(struct DE_eckd_data) +
2758                                 sizeof(struct LO_eckd_data) +
2759                                 sizeof(struct eckd_count);
2760                 break;
2761         default:
2762                 dev_warn(&startdev->cdev->dev,
2763                          "An I/O control call used incorrect flags 0x%x\n",
2764                          fdata->intensity);
2765                 return ERR_PTR(-EINVAL);
2766         }
2767
2768         fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2769         if (IS_ERR(fcp))
2770                 return fcp;
2771
2772         start_priv->count++;
2773         data = fcp->data;
2774         ccw = fcp->cpaddr;
2775
2776         switch (intensity & ~0x08) {
2777         case 0x00: /* Normal format. */
2778                 if (use_prefix) {
2779                         prefix(ccw++, (struct PFX_eckd_data *) data,
2780                                fdata->start_unit, fdata->stop_unit,
2781                                DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2782                         /* grant subsystem permission to format R0 */
2783                         if (r0_perm)
2784                                 ((struct PFX_eckd_data *)data)
2785                                         ->define_extent.ga_extended |= 0x04;
2786                         data += sizeof(struct PFX_eckd_data);
2787                 } else {
2788                         define_extent(ccw++, (struct DE_eckd_data *) data,
2789                                       fdata->start_unit, fdata->stop_unit,
2790                                       DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2791                         /* grant subsystem permission to format R0 */
2792                         if (r0_perm)
2793                                 ((struct DE_eckd_data *) data)
2794                                         ->ga_extended |= 0x04;
2795                         data += sizeof(struct DE_eckd_data);
2796                 }
2797                 ccw[-1].flags |= CCW_FLAG_CC;
2798                 locate_record(ccw++, (struct LO_eckd_data *) data,
2799                               fdata->start_unit, 0, rpt*nr_tracks,
2800                               DASD_ECKD_CCW_WRITE_CKD, base,
2801                               fdata->blksize);
2802                 data += sizeof(struct LO_eckd_data);
2803                 break;
2804         case 0x01: /* Write record zero + format track. */
2805                 if (use_prefix) {
2806                         prefix(ccw++, (struct PFX_eckd_data *) data,
2807                                fdata->start_unit, fdata->stop_unit,
2808                                DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2809                                base, startdev);
2810                         data += sizeof(struct PFX_eckd_data);
2811                 } else {
2812                         define_extent(ccw++, (struct DE_eckd_data *) data,
2813                                fdata->start_unit, fdata->stop_unit,
2814                                DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2815                         data += sizeof(struct DE_eckd_data);
2816                 }
2817                 ccw[-1].flags |= CCW_FLAG_CC;
2818                 locate_record(ccw++, (struct LO_eckd_data *) data,
2819                               fdata->start_unit, 0, rpt * nr_tracks + 1,
2820                               DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2821                               base->block->bp_block);
2822                 data += sizeof(struct LO_eckd_data);
2823                 break;
2824         case 0x04: /* Invalidate track. */
2825                 if (use_prefix) {
2826                         prefix(ccw++, (struct PFX_eckd_data *) data,
2827                                fdata->start_unit, fdata->stop_unit,
2828                                DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2829                         data += sizeof(struct PFX_eckd_data);
2830                 } else {
2831                         define_extent(ccw++, (struct DE_eckd_data *) data,
2832                                fdata->start_unit, fdata->stop_unit,
2833                                DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2834                         data += sizeof(struct DE_eckd_data);
2835                 }
2836                 ccw[-1].flags |= CCW_FLAG_CC;
2837                 locate_record(ccw++, (struct LO_eckd_data *) data,
2838                               fdata->start_unit, 0, 1,
2839                               DASD_ECKD_CCW_WRITE_CKD, base, 8);
2840                 data += sizeof(struct LO_eckd_data);
2841                 break;
2842         }
2843
2844         for (j = 0; j < nr_tracks; j++) {
2845                 /* calculate cylinder and head for the current track */
2846                 set_ch_t(&address,
2847                          (fdata->start_unit + j) /
2848                          base_priv->rdc_data.trk_per_cyl,
2849                          (fdata->start_unit + j) %
2850                          base_priv->rdc_data.trk_per_cyl);
2851                 if (intensity & 0x01) { /* write record zero */
2852                         ect = (struct eckd_count *) data;
2853                         data += sizeof(struct eckd_count);
2854                         ect->cyl = address.cyl;
2855                         ect->head = address.head;
2856                         ect->record = 0;
2857                         ect->kl = 0;
2858                         ect->dl = 8;
2859                         ccw[-1].flags |= CCW_FLAG_CC;
2860                         ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2861                         ccw->flags = CCW_FLAG_SLI;
2862                         ccw->count = 8;
2863                         ccw->cda = (__u32)virt_to_phys(ect);
2864                         ccw++;
2865                 }
2866                 if ((intensity & ~0x08) & 0x04) {       /* erase track */
2867                         ect = (struct eckd_count *) data;
2868                         data += sizeof(struct eckd_count);
2869                         ect->cyl = address.cyl;
2870                         ect->head = address.head;
2871                         ect->record = 1;
2872                         ect->kl = 0;
2873                         ect->dl = 0;
2874                         ccw[-1].flags |= CCW_FLAG_CC;
2875                         ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2876                         ccw->flags = CCW_FLAG_SLI;
2877                         ccw->count = 8;
2878                         ccw->cda = (__u32)virt_to_phys(ect);
2879                 } else {                /* write remaining records */
2880                         for (i = 0; i < rpt; i++) {
2881                                 ect = (struct eckd_count *) data;
2882                                 data += sizeof(struct eckd_count);
2883                                 ect->cyl = address.cyl;
2884                                 ect->head = address.head;
2885                                 ect->record = i + 1;
2886                                 ect->kl = 0;
2887                                 ect->dl = fdata->blksize;
2888                                 /*
2889                                  * Check for special tracks 0-1
2890                                  * when formatting CDL
2891                                  */
2892                                 if ((intensity & 0x08) &&
2893                                     address.cyl == 0 && address.head == 0) {
2894                                         if (i < 3) {
2895                                                 ect->kl = 4;
2896                                                 ect->dl = sizes_trk0[i] - 4;
2897                                         }
2898                                 }
2899                                 if ((intensity & 0x08) &&
2900                                     address.cyl == 0 && address.head == 1) {
2901                                         ect->kl = 44;
2902                                         ect->dl = LABEL_SIZE - 44;
2903                                 }
2904                                 ccw[-1].flags |= CCW_FLAG_CC;
2905                                 if (i != 0 || j == 0)
2906                                         ccw->cmd_code =
2907                                                 DASD_ECKD_CCW_WRITE_CKD;
2908                                 else
2909                                         ccw->cmd_code =
2910                                                 DASD_ECKD_CCW_WRITE_CKD_MT;
2911                                 ccw->flags = CCW_FLAG_SLI;
2912                                 ccw->count = 8;
2913                                 ccw->cda = (__u32)virt_to_phys(ect);
2914                                 ccw++;
2915                         }
2916                 }
2917         }
2918
2919         fcp->startdev = startdev;
2920         fcp->memdev = startdev;
2921         fcp->basedev = base;
2922         fcp->retries = 256;
2923         fcp->expires = startdev->default_expires * HZ;
2924         fcp->buildclk = get_tod_clock();
2925         fcp->status = DASD_CQR_FILLED;
2926
2927         return fcp;
2928 }
2929
2930 /*
2931  * Wrapper function to build a CCW request depending on input data
2932  */
2933 static struct dasd_ccw_req *
2934 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2935                                struct format_data_t *fdata, int enable_pav,
2936                                int tpm, struct eckd_count *fmt_buffer, int rpt)
2937 {
2938         struct dasd_ccw_req *ccw_req;
2939
2940         if (!fmt_buffer) {
2941                 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2942         } else {
2943                 if (tpm)
2944                         ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2945                                                             enable_pav,
2946                                                             fmt_buffer, rpt);
2947                 else
2948                         ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2949                                                         fmt_buffer, rpt);
2950         }
2951
2952         return ccw_req;
2953 }
2954
2955 /*
2956  * Sanity checks on format_data
2957  */
2958 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2959                                           struct format_data_t *fdata)
2960 {
2961         struct dasd_eckd_private *private = base->private;
2962
2963         if (fdata->start_unit >=
2964             (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2965                 dev_warn(&base->cdev->dev,
2966                          "Start track number %u used in formatting is too big\n",
2967                          fdata->start_unit);
2968                 return -EINVAL;
2969         }
2970         if (fdata->stop_unit >=
2971             (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2972                 dev_warn(&base->cdev->dev,
2973                          "Stop track number %u used in formatting is too big\n",
2974                          fdata->stop_unit);
2975                 return -EINVAL;
2976         }
2977         if (fdata->start_unit > fdata->stop_unit) {
2978                 dev_warn(&base->cdev->dev,
2979                          "Start track %u used in formatting exceeds end track\n",
2980                          fdata->start_unit);
2981                 return -EINVAL;
2982         }
2983         if (dasd_check_blocksize(fdata->blksize) != 0) {
2984                 dev_warn(&base->cdev->dev,
2985                          "The DASD cannot be formatted with block size %u\n",
2986                          fdata->blksize);
2987                 return -EINVAL;
2988         }
2989         return 0;
2990 }
2991
2992 /*
2993  * This function will process format_data originally coming from an IOCTL
2994  */
2995 static int dasd_eckd_format_process_data(struct dasd_device *base,
2996                                          struct format_data_t *fdata,
2997                                          int enable_pav, int tpm,
2998                                          struct eckd_count *fmt_buffer, int rpt,
2999                                          struct irb *irb)
3000 {
3001         struct dasd_eckd_private *private = base->private;
3002         struct dasd_ccw_req *cqr, *n;
3003         struct list_head format_queue;
3004         struct dasd_device *device;
3005         char *sense = NULL;
3006         int old_start, old_stop, format_step;
3007         int step, retry;
3008         int rc;
3009
3010         rc = dasd_eckd_format_sanity_checks(base, fdata);
3011         if (rc)
3012                 return rc;
3013
3014         INIT_LIST_HEAD(&format_queue);
3015
3016         old_start = fdata->start_unit;
3017         old_stop = fdata->stop_unit;
3018
3019         if (!tpm && fmt_buffer != NULL) {
3020                 /* Command Mode / Format Check */
3021                 format_step = 1;
3022         } else if (tpm && fmt_buffer != NULL) {
3023                 /* Transport Mode / Format Check */
3024                 format_step = DASD_CQR_MAX_CCW / rpt;
3025         } else {
3026                 /* Normal Formatting */
3027                 format_step = DASD_CQR_MAX_CCW /
3028                         recs_per_track(&private->rdc_data, 0, fdata->blksize);
3029         }
3030
3031         do {
3032                 retry = 0;
3033                 while (fdata->start_unit <= old_stop) {
3034                         step = fdata->stop_unit - fdata->start_unit + 1;
3035                         if (step > format_step) {
3036                                 fdata->stop_unit =
3037                                         fdata->start_unit + format_step - 1;
3038                         }
3039
3040                         cqr = dasd_eckd_format_build_ccw_req(base, fdata,
3041                                                              enable_pav, tpm,
3042                                                              fmt_buffer, rpt);
3043                         if (IS_ERR(cqr)) {
3044                                 rc = PTR_ERR(cqr);
3045                                 if (rc == -ENOMEM) {
3046                                         if (list_empty(&format_queue))
3047                                                 goto out;
3048                                         /*
3049                                          * not enough memory available, start
3050                                          * requests retry after first requests
3051                                          * were finished
3052                                          */
3053                                         retry = 1;
3054                                         break;
3055                                 }
3056                                 goto out_err;
3057                         }
3058                         list_add_tail(&cqr->blocklist, &format_queue);
3059
3060                         if (fmt_buffer) {
3061                                 step = fdata->stop_unit - fdata->start_unit + 1;
3062                                 fmt_buffer += rpt * step;
3063                         }
3064                         fdata->start_unit = fdata->stop_unit + 1;
3065                         fdata->stop_unit = old_stop;
3066                 }
3067
3068                 rc = dasd_sleep_on_queue(&format_queue);
3069
3070 out_err:
3071                 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
3072                         device = cqr->startdev;
3073                         private = device->private;
3074
3075                         if (cqr->status == DASD_CQR_FAILED) {
3076                                 /*
3077                                  * Only get sense data if called by format
3078                                  * check
3079                                  */
3080                                 if (fmt_buffer && irb) {
3081                                         sense = dasd_get_sense(&cqr->irb);
3082                                         memcpy(irb, &cqr->irb, sizeof(*irb));
3083                                 }
3084                                 rc = -EIO;
3085                         }
3086                         list_del_init(&cqr->blocklist);
3087                         dasd_ffree_request(cqr, device);
3088                         private->count--;
3089                 }
3090
3091                 if (rc && rc != -EIO)
3092                         goto out;
3093                 if (rc == -EIO) {
3094                         /*
3095                          * In case fewer than the expected records are on the
3096                          * track, we will most likely get a 'No Record Found'
3097                          * error (in command mode) or a 'File Protected' error
3098                          * (in transport mode). Those particular cases shouldn't
3099                          * pass the -EIO to the IOCTL, therefore reset the rc
3100                          * and continue.
3101                          */
3102                         if (sense &&
3103                             (sense[1] & SNS1_NO_REC_FOUND ||
3104                              sense[1] & SNS1_FILE_PROTECTED))
3105                                 retry = 1;
3106                         else
3107                                 goto out;
3108                 }
3109
3110         } while (retry);
3111
3112 out:
3113         fdata->start_unit = old_start;
3114         fdata->stop_unit = old_stop;
3115
3116         return rc;
3117 }
3118
3119 static int dasd_eckd_format_device(struct dasd_device *base,
3120                                    struct format_data_t *fdata, int enable_pav)
3121 {
3122         return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
3123                                              0, NULL);
3124 }
3125
3126 static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3127                                       struct dasd_ccw_req *cqr)
3128 {
3129         struct dasd_block *block = cqr->block;
3130         struct dasd_format_entry *format;
3131         unsigned long flags;
3132         bool rc = false;
3133
3134         spin_lock_irqsave(&block->format_lock, flags);
3135         if (cqr->trkcount != atomic_read(&block->trkcount)) {
3136                 /*
3137                  * The number of formatted tracks has changed after request
3138                  * start and we can not tell if the current track was involved.
3139                  * To avoid data corruption treat it as if the current track is
3140                  * involved
3141                  */
3142                 rc = true;
3143                 goto out;
3144         }
3145         list_for_each_entry(format, &block->format_list, list) {
3146                 if (format->track == to_format->track) {
3147                         rc = true;
3148                         goto out;
3149                 }
3150         }
3151         list_add_tail(&to_format->list, &block->format_list);
3152
3153 out:
3154         spin_unlock_irqrestore(&block->format_lock, flags);
3155         return rc;
3156 }
3157
3158 static void clear_format_track(struct dasd_format_entry *format,
3159                               struct dasd_block *block)
3160 {
3161         unsigned long flags;
3162
3163         spin_lock_irqsave(&block->format_lock, flags);
3164         atomic_inc(&block->trkcount);
3165         list_del_init(&format->list);
3166         spin_unlock_irqrestore(&block->format_lock, flags);
3167 }
3168
3169 /*
3170  * Callback function to free ESE format requests.
3171  */
3172 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3173 {
3174         struct dasd_device *device = cqr->startdev;
3175         struct dasd_eckd_private *private = device->private;
3176         struct dasd_format_entry *format = data;
3177
3178         clear_format_track(format, cqr->basedev->block);
3179         private->count--;
3180         dasd_ffree_request(cqr, device);
3181 }
3182
3183 static struct dasd_ccw_req *
3184 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3185                      struct irb *irb)
3186 {
3187         struct dasd_eckd_private *private;
3188         struct dasd_format_entry *format;
3189         struct format_data_t fdata;
3190         unsigned int recs_per_trk;
3191         struct dasd_ccw_req *fcqr;
3192         struct dasd_device *base;
3193         struct dasd_block *block;
3194         unsigned int blksize;
3195         struct request *req;
3196         sector_t first_trk;
3197         sector_t last_trk;
3198         sector_t curr_trk;
3199         int rc;
3200
3201         req = dasd_get_callback_data(cqr);
3202         block = cqr->block;
3203         base = block->base;
3204         private = base->private;
3205         blksize = block->bp_block;
3206         recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3207         format = &startdev->format_entry;
3208
3209         first_trk = blk_rq_pos(req) >> block->s2b_shift;
3210         sector_div(first_trk, recs_per_trk);
3211         last_trk =
3212                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3213         sector_div(last_trk, recs_per_trk);
3214         rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3215         if (rc)
3216                 return ERR_PTR(rc);
3217
3218         if (curr_trk < first_trk || curr_trk > last_trk) {
3219                 DBF_DEV_EVENT(DBF_WARNING, startdev,
3220                               "ESE error track %llu not within range %llu - %llu\n",
3221                               curr_trk, first_trk, last_trk);
3222                 return ERR_PTR(-EINVAL);
3223         }
3224         format->track = curr_trk;
3225         /* test if track is already in formatting by another thread */
3226         if (test_and_set_format_track(format, cqr)) {
3227                 /* this is no real error so do not count down retries */
3228                 cqr->retries++;
3229                 return ERR_PTR(-EEXIST);
3230         }
3231
3232         fdata.start_unit = curr_trk;
3233         fdata.stop_unit = curr_trk;
3234         fdata.blksize = blksize;
3235         fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3236
3237         rc = dasd_eckd_format_sanity_checks(base, &fdata);
3238         if (rc)
3239                 return ERR_PTR(-EINVAL);
3240
3241         /*
3242          * We're building the request with PAV disabled as we're reusing
3243          * the former startdev.
3244          */
3245         fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3246         if (IS_ERR(fcqr))
3247                 return fcqr;
3248
3249         fcqr->callback = dasd_eckd_ese_format_cb;
3250         fcqr->callback_data = (void *) format;
3251
3252         return fcqr;
3253 }
3254
3255 /*
3256  * When data is read from an unformatted area of an ESE volume, this function
3257  * returns zeroed data and thereby mimics a read of zero data.
3258  *
3259  * The first unformatted track is the one that got the NRF error, the address is
3260  * encoded in the sense data.
3261  *
3262  * All tracks before have returned valid data and should not be touched.
3263  * All tracks after the unformatted track might be formatted or not. This is
3264  * currently not known, remember the processed data and return the remainder of
3265  * the request to the blocklayer in __dasd_cleanup_cqr().
3266  */
3267 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3268 {
3269         struct dasd_eckd_private *private;
3270         sector_t first_trk, last_trk;
3271         sector_t first_blk, last_blk;
3272         unsigned int blksize, off;
3273         unsigned int recs_per_trk;
3274         struct dasd_device *base;
3275         struct req_iterator iter;
3276         struct dasd_block *block;
3277         unsigned int skip_block;
3278         unsigned int blk_count;
3279         struct request *req;
3280         struct bio_vec bv;
3281         sector_t curr_trk;
3282         sector_t end_blk;
3283         char *dst;
3284         int rc;
3285
3286         req = (struct request *) cqr->callback_data;
3287         base = cqr->block->base;
3288         blksize = base->block->bp_block;
3289         block =  cqr->block;
3290         private = base->private;
3291         skip_block = 0;
3292         blk_count = 0;
3293
3294         recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3295         first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
3296         sector_div(first_trk, recs_per_trk);
3297         last_trk = last_blk =
3298                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3299         sector_div(last_trk, recs_per_trk);
3300         rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3301         if (rc)
3302                 return rc;
3303
3304         /* sanity check if the current track from sense data is valid */
3305         if (curr_trk < first_trk || curr_trk > last_trk) {
3306                 DBF_DEV_EVENT(DBF_WARNING, base,
3307                               "ESE error track %llu not within range %llu - %llu\n",
3308                               curr_trk, first_trk, last_trk);
3309                 return -EINVAL;
3310         }
3311
3312         /*
3313          * if not the first track got the NRF error we have to skip over valid
3314          * blocks
3315          */
3316         if (curr_trk != first_trk)
3317                 skip_block = curr_trk * recs_per_trk - first_blk;
3318
3319         /* we have no information beyond the current track */
3320         end_blk = (curr_trk + 1) * recs_per_trk;
3321
3322         rq_for_each_segment(bv, req, iter) {
3323                 dst = bvec_virt(&bv);
3324                 for (off = 0; off < bv.bv_len; off += blksize) {
3325                         if (first_blk + blk_count >= end_blk) {
3326                                 cqr->proc_bytes = blk_count * blksize;
3327                                 return 0;
3328                         }
3329                         if (dst && !skip_block)
3330                                 memset(dst, 0, blksize);
3331                         else
3332                                 skip_block--;
3333                         dst += blksize;
3334                         blk_count++;
3335                 }
3336         }
3337         return 0;
3338 }
3339
3340 /*
3341  * Helper function to count consecutive records of a single track.
3342  */
3343 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3344                                    int max)
3345 {
3346         int head;
3347         int i;
3348
3349         head = fmt_buffer[start].head;
3350
3351         /*
3352          * There are 3 conditions where we stop counting:
3353          * - if data reoccurs (same head and record may reoccur), which may
3354          *   happen due to the way DASD_ECKD_CCW_READ_COUNT works
3355          * - when the head changes, because we're iterating over several tracks
3356          *   then (DASD_ECKD_CCW_READ_COUNT_MT)
3357          * - when we've reached the end of sensible data in the buffer (the
3358          *   record will be 0 then)
3359          */
3360         for (i = start; i < max; i++) {
3361                 if (i > start) {
3362                         if ((fmt_buffer[i].head == head &&
3363                             fmt_buffer[i].record == 1) ||
3364                             fmt_buffer[i].head != head ||
3365                             fmt_buffer[i].record == 0)
3366                                 break;
3367                 }
3368         }
3369
3370         return i - start;
3371 }
3372
3373 /*
3374  * Evaluate a given range of tracks. Data like number of records, blocksize,
3375  * record ids, and key length are compared with expected data.
3376  *
3377  * If a mismatch occurs, the corresponding error bit is set, as well as
3378  * additional information, depending on the error.
3379  */
3380 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3381                                              struct format_check_t *cdata,
3382                                              int rpt_max, int rpt_exp,
3383                                              int trk_per_cyl, int tpm)
3384 {
3385         struct ch_t geo;
3386         int max_entries;
3387         int count = 0;
3388         int trkcount;
3389         int blksize;
3390         int pos = 0;
3391         int i, j;
3392         int kl;
3393
3394         trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3395         max_entries = trkcount * rpt_max;
3396
3397         for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3398                 /* Calculate the correct next starting position in the buffer */
3399                 if (tpm) {
3400                         while (fmt_buffer[pos].record == 0 &&
3401                                fmt_buffer[pos].dl == 0) {
3402                                 if (pos++ > max_entries)
3403                                         break;
3404                         }
3405                 } else {
3406                         if (i != cdata->expect.start_unit)
3407                                 pos += rpt_max - count;
3408                 }
3409
3410                 /* Calculate the expected geo values for the current track */
3411                 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3412
3413                 /* Count and check number of records */
3414                 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3415
3416                 if (count < rpt_exp) {
3417                         cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3418                         break;
3419                 }
3420                 if (count > rpt_exp) {
3421                         cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3422                         break;
3423                 }
3424
3425                 for (j = 0; j < count; j++, pos++) {
3426                         blksize = cdata->expect.blksize;
3427                         kl = 0;
3428
3429                         /*
3430                          * Set special values when checking CDL formatted
3431                          * devices.
3432                          */
3433                         if ((cdata->expect.intensity & 0x08) &&
3434                             geo.cyl == 0 && geo.head == 0) {
3435                                 if (j < 3) {
3436                                         blksize = sizes_trk0[j] - 4;
3437                                         kl = 4;
3438                                 }
3439                         }
3440                         if ((cdata->expect.intensity & 0x08) &&
3441                             geo.cyl == 0 && geo.head == 1) {
3442                                 blksize = LABEL_SIZE - 44;
3443                                 kl = 44;
3444                         }
3445
3446                         /* Check blocksize */
3447                         if (fmt_buffer[pos].dl != blksize) {
3448                                 cdata->result = DASD_FMT_ERR_BLKSIZE;
3449                                 goto out;
3450                         }
3451                         /* Check if key length is 0 */
3452                         if (fmt_buffer[pos].kl != kl) {
3453                                 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3454                                 goto out;
3455                         }
3456                         /* Check if record_id is correct */
3457                         if (fmt_buffer[pos].cyl != geo.cyl ||
3458                             fmt_buffer[pos].head != geo.head ||
3459                             fmt_buffer[pos].record != (j + 1)) {
3460                                 cdata->result = DASD_FMT_ERR_RECORD_ID;
3461                                 goto out;
3462                         }
3463                 }
3464         }
3465
3466 out:
3467         /*
3468          * In case of no errors, we need to decrease by one
3469          * to get the correct positions.
3470          */
3471         if (!cdata->result) {
3472                 i--;
3473                 pos--;
3474         }
3475
3476         cdata->unit = i;
3477         cdata->num_records = count;
3478         cdata->rec = fmt_buffer[pos].record;
3479         cdata->blksize = fmt_buffer[pos].dl;
3480         cdata->key_length = fmt_buffer[pos].kl;
3481 }
3482
3483 /*
3484  * Check the format of a range of tracks of a DASD.
3485  */
3486 static int dasd_eckd_check_device_format(struct dasd_device *base,
3487                                          struct format_check_t *cdata,
3488                                          int enable_pav)
3489 {
3490         struct dasd_eckd_private *private = base->private;
3491         struct eckd_count *fmt_buffer;
3492         struct irb irb;
3493         int rpt_max, rpt_exp;
3494         int fmt_buffer_size;
3495         int trk_per_cyl;
3496         int trkcount;
3497         int tpm = 0;
3498         int rc;
3499
3500         trk_per_cyl = private->rdc_data.trk_per_cyl;
3501
3502         /* Get maximum and expected amount of records per track */
3503         rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3504         rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3505
3506         trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3507         fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3508
3509         fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3510         if (!fmt_buffer)
3511                 return -ENOMEM;
3512
3513         /*
3514          * A certain FICON feature subset is needed to operate in transport
3515          * mode. Additionally, the support for transport mode is implicitly
3516          * checked by comparing the buffer size with fcx_max_data. As long as
3517          * the buffer size is smaller we can operate in transport mode and
3518          * process multiple tracks. If not, only one track at once is being
3519          * processed using command mode.
3520          */
3521         if ((private->features.feature[40] & 0x04) &&
3522             fmt_buffer_size <= private->fcx_max_data)
3523                 tpm = 1;
3524
3525         rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3526                                            tpm, fmt_buffer, rpt_max, &irb);
3527         if (rc && rc != -EIO)
3528                 goto out;
3529         if (rc == -EIO) {
3530                 /*
3531                  * If our first attempt with transport mode enabled comes back
3532                  * with an incorrect length error, we're going to retry the
3533                  * check with command mode.
3534                  */
3535                 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3536                         tpm = 0;
3537                         rc = dasd_eckd_format_process_data(base, &cdata->expect,
3538                                                            enable_pav, tpm,
3539                                                            fmt_buffer, rpt_max,
3540                                                            &irb);
3541                         if (rc)
3542                                 goto out;
3543                 } else {
3544                         goto out;
3545                 }
3546         }
3547
3548         dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3549                                          trk_per_cyl, tpm);
3550
3551 out:
3552         kfree(fmt_buffer);
3553
3554         return rc;
3555 }
3556
3557 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3558 {
3559         if (cqr->retries < 0) {
3560                 cqr->status = DASD_CQR_FAILED;
3561                 return;
3562         }
3563         cqr->status = DASD_CQR_FILLED;
3564         if (cqr->block && (cqr->startdev != cqr->block->base)) {
3565                 dasd_eckd_reset_ccw_to_base_io(cqr);
3566                 cqr->startdev = cqr->block->base;
3567                 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3568         }
3569 };
3570
3571 static dasd_erp_fn_t
3572 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3573 {
3574         struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3575         struct ccw_device *cdev = device->cdev;
3576
3577         switch (cdev->id.cu_type) {
3578         case 0x3990:
3579         case 0x2105:
3580         case 0x2107:
3581         case 0x1750:
3582                 return dasd_3990_erp_action;
3583         case 0x9343:
3584         case 0x3880:
3585         default:
3586                 return dasd_default_erp_action;
3587         }
3588 }
3589
3590 static dasd_erp_fn_t
3591 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3592 {
3593         return dasd_default_erp_postaction;
3594 }
3595
3596 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3597                                               struct dasd_ccw_req *cqr,
3598                                               struct irb *irb)
3599 {
3600         char mask;
3601         char *sense = NULL;
3602         struct dasd_eckd_private *private = device->private;
3603
3604         /* first of all check for state change pending interrupt */
3605         mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3606         if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3607                 /*
3608                  * for alias only, not in offline processing
3609                  * and only if not suspended
3610                  */
3611                 if (!device->block && private->lcu &&
3612                     device->state == DASD_STATE_ONLINE &&
3613                     !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3614                     !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3615                         /* schedule worker to reload device */
3616                         dasd_reload_device(device);
3617                 }
3618                 dasd_generic_handle_state_change(device);
3619                 return;
3620         }
3621
3622         sense = dasd_get_sense(irb);
3623         if (!sense)
3624                 return;
3625
3626         /* summary unit check */
3627         if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3628             (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3629                 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3630                         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3631                                       "eckd suc: device already notified");
3632                         return;
3633                 }
3634                 sense = dasd_get_sense(irb);
3635                 if (!sense) {
3636                         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3637                                       "eckd suc: no reason code available");
3638                         clear_bit(DASD_FLAG_SUC, &device->flags);
3639                         return;
3640
3641                 }
3642                 private->suc_reason = sense[8];
3643                 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3644                               "eckd handle summary unit check: reason",
3645                               private->suc_reason);
3646                 dasd_get_device(device);
3647                 if (!schedule_work(&device->suc_work))
3648                         dasd_put_device(device);
3649
3650                 return;
3651         }
3652
3653         /* service information message SIM */
3654         if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3655             ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3656                 dasd_3990_erp_handle_sim(device, sense);
3657                 return;
3658         }
3659
3660         /* loss of device reservation is handled via base devices only
3661          * as alias devices may be used with several bases
3662          */
3663         if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3664             (sense[7] == 0x3F) &&
3665             (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3666             test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3667                 if (device->features & DASD_FEATURE_FAILONSLCK)
3668                         set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3669                 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3670                 dev_err(&device->cdev->dev,
3671                         "The device reservation was lost\n");
3672         }
3673 }
3674
3675 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3676                                        unsigned int first_trk,
3677                                        unsigned int last_trk)
3678 {
3679         struct dasd_eckd_private *private = device->private;
3680         unsigned int trks_per_vol;
3681         int rc = 0;
3682
3683         trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3684
3685         if (first_trk >= trks_per_vol) {
3686                 dev_warn(&device->cdev->dev,
3687                          "Start track number %u used in the space release command is too big\n",
3688                          first_trk);
3689                 rc = -EINVAL;
3690         } else if (last_trk >= trks_per_vol) {
3691                 dev_warn(&device->cdev->dev,
3692                          "Stop track number %u used in the space release command is too big\n",
3693                          last_trk);
3694                 rc = -EINVAL;
3695         } else if (first_trk > last_trk) {
3696                 dev_warn(&device->cdev->dev,
3697                          "Start track %u used in the space release command exceeds the end track\n",
3698                          first_trk);
3699                 rc = -EINVAL;
3700         }
3701         return rc;
3702 }
3703
3704 /*
3705  * Helper function to count the amount of involved extents within a given range
3706  * with extent alignment in mind.
3707  */
3708 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3709 {
3710         int cur_pos = 0;
3711         int count = 0;
3712         int tmp;
3713
3714         if (from == to)
3715                 return 1;
3716
3717         /* Count first partial extent */
3718         if (from % trks_per_ext != 0) {
3719                 tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3720                 if (tmp > to)
3721                         tmp = to;
3722                 cur_pos = tmp - from + 1;
3723                 count++;
3724         }
3725         /* Count full extents */
3726         if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3727                 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3728                 count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3729                 cur_pos = tmp;
3730         }
3731         /* Count last partial extent */
3732         if (cur_pos < to)
3733                 count++;
3734
3735         return count;
3736 }
3737
3738 static int dasd_in_copy_relation(struct dasd_device *device)
3739 {
3740         struct dasd_pprc_data_sc4 *temp;
3741         int rc;
3742
3743         if (!dasd_eckd_pprc_enabled(device))
3744                 return 0;
3745
3746         temp = kzalloc(sizeof(*temp), GFP_KERNEL);
3747         if (!temp)
3748                 return -ENOMEM;
3749
3750         rc = dasd_eckd_query_pprc_status(device, temp);
3751         if (!rc)
3752                 rc = temp->dev_info[0].state;
3753
3754         kfree(temp);
3755         return rc;
3756 }
3757
3758 /*
3759  * Release allocated space for a given range or an entire volume.
3760  */
3761 static struct dasd_ccw_req *
3762 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3763                   struct request *req, unsigned int first_trk,
3764                   unsigned int last_trk, int by_extent)
3765 {
3766         struct dasd_eckd_private *private = device->private;
3767         struct dasd_dso_ras_ext_range *ras_range;
3768         struct dasd_rssd_features *features;
3769         struct dasd_dso_ras_data *ras_data;
3770         u16 heads, beg_head, end_head;
3771         int cur_to_trk, cur_from_trk;
3772         struct dasd_ccw_req *cqr;
3773         u32 beg_cyl, end_cyl;
3774         int copy_relation;
3775         struct ccw1 *ccw;
3776         int trks_per_ext;
3777         size_t ras_size;
3778         size_t size;
3779         int nr_exts;
3780         void *rq;
3781         int i;
3782
3783         if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3784                 return ERR_PTR(-EINVAL);
3785
3786         copy_relation = dasd_in_copy_relation(device);
3787         if (copy_relation < 0)
3788                 return ERR_PTR(copy_relation);
3789
3790         rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3791
3792         features = &private->features;
3793
3794         trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3795         nr_exts = 0;
3796         if (by_extent)
3797                 nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3798         ras_size = sizeof(*ras_data);
3799         size = ras_size + (nr_exts * sizeof(*ras_range));
3800
3801         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3802         if (IS_ERR(cqr)) {
3803                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3804                                 "Could not allocate RAS request");
3805                 return cqr;
3806         }
3807
3808         ras_data = cqr->data;
3809         memset(ras_data, 0, size);
3810
3811         ras_data->order = DSO_ORDER_RAS;
3812         ras_data->flags.vol_type = 0; /* CKD volume */
3813         /* Release specified extents or entire volume */
3814         ras_data->op_flags.by_extent = by_extent;
3815         /*
3816          * This bit guarantees initialisation of tracks within an extent that is
3817          * not fully specified, but is only supported with a certain feature
3818          * subset and for devices not in a copy relation.
3819          */
3820         if (features->feature[56] & 0x01 && !copy_relation)
3821                 ras_data->op_flags.guarantee_init = 1;
3822
3823         ras_data->lss = private->conf.ned->ID;
3824         ras_data->dev_addr = private->conf.ned->unit_addr;
3825         ras_data->nr_exts = nr_exts;
3826
3827         if (by_extent) {
3828                 heads = private->rdc_data.trk_per_cyl;
3829                 cur_from_trk = first_trk;
3830                 cur_to_trk = first_trk + trks_per_ext -
3831                         (first_trk % trks_per_ext) - 1;
3832                 if (cur_to_trk > last_trk)
3833                         cur_to_trk = last_trk;
3834                 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3835
3836                 for (i = 0; i < nr_exts; i++) {
3837                         beg_cyl = cur_from_trk / heads;
3838                         beg_head = cur_from_trk % heads;
3839                         end_cyl = cur_to_trk / heads;
3840                         end_head = cur_to_trk % heads;
3841
3842                         set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3843                         set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3844
3845                         cur_from_trk = cur_to_trk + 1;
3846                         cur_to_trk = cur_from_trk + trks_per_ext - 1;
3847                         if (cur_to_trk > last_trk)
3848                                 cur_to_trk = last_trk;
3849                         ras_range++;
3850                 }
3851         }
3852
3853         ccw = cqr->cpaddr;
3854         ccw->cda = (__u32)virt_to_phys(cqr->data);
3855         ccw->cmd_code = DASD_ECKD_CCW_DSO;
3856         ccw->count = size;
3857
3858         cqr->startdev = device;
3859         cqr->memdev = device;
3860         cqr->block = block;
3861         cqr->retries = 256;
3862         cqr->expires = device->default_expires * HZ;
3863         cqr->buildclk = get_tod_clock();
3864         cqr->status = DASD_CQR_FILLED;
3865
3866         return cqr;
3867 }
3868
3869 static int dasd_eckd_release_space_full(struct dasd_device *device)
3870 {
3871         struct dasd_ccw_req *cqr;
3872         int rc;
3873
3874         cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3875         if (IS_ERR(cqr))
3876                 return PTR_ERR(cqr);
3877
3878         rc = dasd_sleep_on_interruptible(cqr);
3879
3880         dasd_sfree_request(cqr, cqr->memdev);
3881
3882         return rc;
3883 }
3884
3885 static int dasd_eckd_release_space_trks(struct dasd_device *device,
3886                                         unsigned int from, unsigned int to)
3887 {
3888         struct dasd_eckd_private *private = device->private;
3889         struct dasd_block *block = device->block;
3890         struct dasd_ccw_req *cqr, *n;
3891         struct list_head ras_queue;
3892         unsigned int device_exts;
3893         int trks_per_ext;
3894         int stop, step;
3895         int cur_pos;
3896         int rc = 0;
3897         int retry;
3898
3899         INIT_LIST_HEAD(&ras_queue);
3900
3901         device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3902         trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3903
3904         /* Make sure device limits are not exceeded */
3905         step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3906         cur_pos = from;
3907
3908         do {
3909                 retry = 0;
3910                 while (cur_pos < to) {
3911                         stop = cur_pos + step -
3912                                 ((cur_pos + step) % trks_per_ext) - 1;
3913                         if (stop > to)
3914                                 stop = to;
3915
3916                         cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3917                         if (IS_ERR(cqr)) {
3918                                 rc = PTR_ERR(cqr);
3919                                 if (rc == -ENOMEM) {
3920                                         if (list_empty(&ras_queue))
3921                                                 goto out;
3922                                         retry = 1;
3923                                         break;
3924                                 }
3925                                 goto err_out;
3926                         }
3927
3928                         spin_lock_irq(&block->queue_lock);
3929                         list_add_tail(&cqr->blocklist, &ras_queue);
3930                         spin_unlock_irq(&block->queue_lock);
3931                         cur_pos = stop + 1;
3932                 }
3933
3934                 rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3935
3936 err_out:
3937                 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3938                         device = cqr->startdev;
3939                         private = device->private;
3940
3941                         spin_lock_irq(&block->queue_lock);
3942                         list_del_init(&cqr->blocklist);
3943                         spin_unlock_irq(&block->queue_lock);
3944                         dasd_sfree_request(cqr, device);
3945                         private->count--;
3946                 }
3947         } while (retry);
3948
3949 out:
3950         return rc;
3951 }
3952
3953 static int dasd_eckd_release_space(struct dasd_device *device,
3954                                    struct format_data_t *rdata)
3955 {
3956         if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3957                 return dasd_eckd_release_space_full(device);
3958         else if (rdata->intensity == 0)
3959                 return dasd_eckd_release_space_trks(device, rdata->start_unit,
3960                                                     rdata->stop_unit);
3961         else
3962                 return -EINVAL;
3963 }
3964
3965 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3966                                                struct dasd_device *startdev,
3967                                                struct dasd_block *block,
3968                                                struct request *req,
3969                                                sector_t first_rec,
3970                                                sector_t last_rec,
3971                                                sector_t first_trk,
3972                                                sector_t last_trk,
3973                                                unsigned int first_offs,
3974                                                unsigned int last_offs,
3975                                                unsigned int blk_per_trk,
3976                                                unsigned int blksize)
3977 {
3978         struct dasd_eckd_private *private;
3979         unsigned long *idaws;
3980         struct LO_eckd_data *LO_data;
3981         struct dasd_ccw_req *cqr;
3982         struct ccw1 *ccw;
3983         struct req_iterator iter;
3984         struct bio_vec bv;
3985         char *dst;
3986         unsigned int off;
3987         int count, cidaw, cplength, datasize;
3988         sector_t recid;
3989         unsigned char cmd, rcmd;
3990         int use_prefix;
3991         struct dasd_device *basedev;
3992
3993         basedev = block->base;
3994         private = basedev->private;
3995         if (rq_data_dir(req) == READ)
3996                 cmd = DASD_ECKD_CCW_READ_MT;
3997         else if (rq_data_dir(req) == WRITE)
3998                 cmd = DASD_ECKD_CCW_WRITE_MT;
3999         else
4000                 return ERR_PTR(-EINVAL);
4001
4002         /* Check struct bio and count the number of blocks for the request. */
4003         count = 0;
4004         cidaw = 0;
4005         rq_for_each_segment(bv, req, iter) {
4006                 if (bv.bv_len & (blksize - 1))
4007                         /* Eckd can only do full blocks. */
4008                         return ERR_PTR(-EINVAL);
4009                 count += bv.bv_len >> (block->s2b_shift + 9);
4010                 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
4011                         cidaw += bv.bv_len >> (block->s2b_shift + 9);
4012         }
4013         /* Paranoia. */
4014         if (count != last_rec - first_rec + 1)
4015                 return ERR_PTR(-EINVAL);
4016
4017         /* use the prefix command if available */
4018         use_prefix = private->features.feature[8] & 0x01;
4019         if (use_prefix) {
4020                 /* 1x prefix + number of blocks */
4021                 cplength = 2 + count;
4022                 /* 1x prefix + cidaws*sizeof(long) */
4023                 datasize = sizeof(struct PFX_eckd_data) +
4024                         sizeof(struct LO_eckd_data) +
4025                         cidaw * sizeof(unsigned long);
4026         } else {
4027                 /* 1x define extent + 1x locate record + number of blocks */
4028                 cplength = 2 + count;
4029                 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
4030                 datasize = sizeof(struct DE_eckd_data) +
4031                         sizeof(struct LO_eckd_data) +
4032                         cidaw * sizeof(unsigned long);
4033         }
4034         /* Find out the number of additional locate record ccws for cdl. */
4035         if (private->uses_cdl && first_rec < 2*blk_per_trk) {
4036                 if (last_rec >= 2*blk_per_trk)
4037                         count = 2*blk_per_trk - first_rec;
4038                 cplength += count;
4039                 datasize += count*sizeof(struct LO_eckd_data);
4040         }
4041         /* Allocate the ccw request. */
4042         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4043                                    startdev, blk_mq_rq_to_pdu(req));
4044         if (IS_ERR(cqr))
4045                 return cqr;
4046         ccw = cqr->cpaddr;
4047         /* First ccw is define extent or prefix. */
4048         if (use_prefix) {
4049                 if (prefix(ccw++, cqr->data, first_trk,
4050                            last_trk, cmd, basedev, startdev) == -EAGAIN) {
4051                         /* Clock not in sync and XRC is enabled.
4052                          * Try again later.
4053                          */
4054                         dasd_sfree_request(cqr, startdev);
4055                         return ERR_PTR(-EAGAIN);
4056                 }
4057                 idaws = (unsigned long *) (cqr->data +
4058                                            sizeof(struct PFX_eckd_data));
4059         } else {
4060                 if (define_extent(ccw++, cqr->data, first_trk,
4061                                   last_trk, cmd, basedev, 0) == -EAGAIN) {
4062                         /* Clock not in sync and XRC is enabled.
4063                          * Try again later.
4064                          */
4065                         dasd_sfree_request(cqr, startdev);
4066                         return ERR_PTR(-EAGAIN);
4067                 }
4068                 idaws = (unsigned long *) (cqr->data +
4069                                            sizeof(struct DE_eckd_data));
4070         }
4071         /* Build locate_record+read/write/ccws. */
4072         LO_data = (struct LO_eckd_data *) (idaws + cidaw);
4073         recid = first_rec;
4074         if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
4075                 /* Only standard blocks so there is just one locate record. */
4076                 ccw[-1].flags |= CCW_FLAG_CC;
4077                 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
4078                               last_rec - recid + 1, cmd, basedev, blksize);
4079         }
4080         rq_for_each_segment(bv, req, iter) {
4081                 dst = bvec_virt(&bv);
4082                 if (dasd_page_cache) {
4083                         char *copy = kmem_cache_alloc(dasd_page_cache,
4084                                                       GFP_DMA | __GFP_NOWARN);
4085                         if (copy && rq_data_dir(req) == WRITE)
4086                                 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
4087                         if (copy)
4088                                 dst = copy + bv.bv_offset;
4089                 }
4090                 for (off = 0; off < bv.bv_len; off += blksize) {
4091                         sector_t trkid = recid;
4092                         unsigned int recoffs = sector_div(trkid, blk_per_trk);
4093                         rcmd = cmd;
4094                         count = blksize;
4095                         /* Locate record for cdl special block ? */
4096                         if (private->uses_cdl && recid < 2*blk_per_trk) {
4097                                 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
4098                                         rcmd |= 0x8;
4099                                         count = dasd_eckd_cdl_reclen(recid);
4100                                         if (count < blksize &&
4101                                             rq_data_dir(req) == READ)
4102                                                 memset(dst + count, 0xe5,
4103                                                        blksize - count);
4104                                 }
4105                                 ccw[-1].flags |= CCW_FLAG_CC;
4106                                 locate_record(ccw++, LO_data++,
4107                                               trkid, recoffs + 1,
4108                                               1, rcmd, basedev, count);
4109                         }
4110                         /* Locate record for standard blocks ? */
4111                         if (private->uses_cdl && recid == 2*blk_per_trk) {
4112                                 ccw[-1].flags |= CCW_FLAG_CC;
4113                                 locate_record(ccw++, LO_data++,
4114                                               trkid, recoffs + 1,
4115                                               last_rec - recid + 1,
4116                                               cmd, basedev, count);
4117                         }
4118                         /* Read/write ccw. */
4119                         ccw[-1].flags |= CCW_FLAG_CC;
4120                         ccw->cmd_code = rcmd;
4121                         ccw->count = count;
4122                         if (idal_is_needed(dst, blksize)) {
4123                                 ccw->cda = (__u32)virt_to_phys(idaws);
4124                                 ccw->flags = CCW_FLAG_IDA;
4125                                 idaws = idal_create_words(idaws, dst, blksize);
4126                         } else {
4127                                 ccw->cda = (__u32)virt_to_phys(dst);
4128                                 ccw->flags = 0;
4129                         }
4130                         ccw++;
4131                         dst += blksize;
4132                         recid++;
4133                 }
4134         }
4135         if (blk_noretry_request(req) ||
4136             block->base->features & DASD_FEATURE_FAILFAST)
4137                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4138         cqr->startdev = startdev;
4139         cqr->memdev = startdev;
4140         cqr->block = block;
4141         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4142         cqr->lpm = dasd_path_get_ppm(startdev);
4143         cqr->retries = startdev->default_retries;
4144         cqr->buildclk = get_tod_clock();
4145         cqr->status = DASD_CQR_FILLED;
4146
4147         /* Set flags to suppress output for expected errors */
4148         if (dasd_eckd_is_ese(basedev)) {
4149                 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4150                 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4151                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4152         }
4153
4154         return cqr;
4155 }
4156
4157 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
4158                                                struct dasd_device *startdev,
4159                                                struct dasd_block *block,
4160                                                struct request *req,
4161                                                sector_t first_rec,
4162                                                sector_t last_rec,
4163                                                sector_t first_trk,
4164                                                sector_t last_trk,
4165                                                unsigned int first_offs,
4166                                                unsigned int last_offs,
4167                                                unsigned int blk_per_trk,
4168                                                unsigned int blksize)
4169 {
4170         unsigned long *idaws;
4171         struct dasd_ccw_req *cqr;
4172         struct ccw1 *ccw;
4173         struct req_iterator iter;
4174         struct bio_vec bv;
4175         char *dst, *idaw_dst;
4176         unsigned int cidaw, cplength, datasize;
4177         unsigned int tlf;
4178         sector_t recid;
4179         unsigned char cmd;
4180         struct dasd_device *basedev;
4181         unsigned int trkcount, count, count_to_trk_end;
4182         unsigned int idaw_len, seg_len, part_len, len_to_track_end;
4183         unsigned char new_track, end_idaw;
4184         sector_t trkid;
4185         unsigned int recoffs;
4186
4187         basedev = block->base;
4188         if (rq_data_dir(req) == READ)
4189                 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4190         else if (rq_data_dir(req) == WRITE)
4191                 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4192         else
4193                 return ERR_PTR(-EINVAL);
4194
4195         /* Track based I/O needs IDAWs for each page, and not just for
4196          * 64 bit addresses. We need additional idals for pages
4197          * that get filled from two tracks, so we use the number
4198          * of records as upper limit.
4199          */
4200         cidaw = last_rec - first_rec + 1;
4201         trkcount = last_trk - first_trk + 1;
4202
4203         /* 1x prefix + one read/write ccw per track */
4204         cplength = 1 + trkcount;
4205
4206         datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
4207
4208         /* Allocate the ccw request. */
4209         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4210                                    startdev, blk_mq_rq_to_pdu(req));
4211         if (IS_ERR(cqr))
4212                 return cqr;
4213         ccw = cqr->cpaddr;
4214         /* transfer length factor: how many bytes to read from the last track */
4215         if (first_trk == last_trk)
4216                 tlf = last_offs - first_offs + 1;
4217         else
4218                 tlf = last_offs + 1;
4219         tlf *= blksize;
4220
4221         if (prefix_LRE(ccw++, cqr->data, first_trk,
4222                        last_trk, cmd, basedev, startdev,
4223                        1 /* format */, first_offs + 1,
4224                        trkcount, blksize,
4225                        tlf) == -EAGAIN) {
4226                 /* Clock not in sync and XRC is enabled.
4227                  * Try again later.
4228                  */
4229                 dasd_sfree_request(cqr, startdev);
4230                 return ERR_PTR(-EAGAIN);
4231         }
4232
4233         /*
4234          * The translation of request into ccw programs must meet the
4235          * following conditions:
4236          * - all idaws but the first and the last must address full pages
4237          *   (or 2K blocks on 31-bit)
4238          * - the scope of a ccw and it's idal ends with the track boundaries
4239          */
4240         idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
4241         recid = first_rec;
4242         new_track = 1;
4243         end_idaw = 0;
4244         len_to_track_end = 0;
4245         idaw_dst = NULL;
4246         idaw_len = 0;
4247         rq_for_each_segment(bv, req, iter) {
4248                 dst = bvec_virt(&bv);
4249                 seg_len = bv.bv_len;
4250                 while (seg_len) {
4251                         if (new_track) {
4252                                 trkid = recid;
4253                                 recoffs = sector_div(trkid, blk_per_trk);
4254                                 count_to_trk_end = blk_per_trk - recoffs;
4255                                 count = min((last_rec - recid + 1),
4256                                             (sector_t)count_to_trk_end);
4257                                 len_to_track_end = count * blksize;
4258                                 ccw[-1].flags |= CCW_FLAG_CC;
4259                                 ccw->cmd_code = cmd;
4260                                 ccw->count = len_to_track_end;
4261                                 ccw->cda = (__u32)virt_to_phys(idaws);
4262                                 ccw->flags = CCW_FLAG_IDA;
4263                                 ccw++;
4264                                 recid += count;
4265                                 new_track = 0;
4266                                 /* first idaw for a ccw may start anywhere */
4267                                 if (!idaw_dst)
4268                                         idaw_dst = dst;
4269                         }
4270                         /* If we start a new idaw, we must make sure that it
4271                          * starts on an IDA_BLOCK_SIZE boundary.
4272                          * If we continue an idaw, we must make sure that the
4273                          * current segment begins where the so far accumulated
4274                          * idaw ends
4275                          */
4276                         if (!idaw_dst) {
4277                                 if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) {
4278                                         dasd_sfree_request(cqr, startdev);
4279                                         return ERR_PTR(-ERANGE);
4280                                 } else
4281                                         idaw_dst = dst;
4282                         }
4283                         if ((idaw_dst + idaw_len) != dst) {
4284                                 dasd_sfree_request(cqr, startdev);
4285                                 return ERR_PTR(-ERANGE);
4286                         }
4287                         part_len = min(seg_len, len_to_track_end);
4288                         seg_len -= part_len;
4289                         dst += part_len;
4290                         idaw_len += part_len;
4291                         len_to_track_end -= part_len;
4292                         /* collected memory area ends on an IDA_BLOCK border,
4293                          * -> create an idaw
4294                          * idal_create_words will handle cases where idaw_len
4295                          * is larger then IDA_BLOCK_SIZE
4296                          */
4297                         if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
4298                                 end_idaw = 1;
4299                         /* We also need to end the idaw at track end */
4300                         if (!len_to_track_end) {
4301                                 new_track = 1;
4302                                 end_idaw = 1;
4303                         }
4304                         if (end_idaw) {
4305                                 idaws = idal_create_words(idaws, idaw_dst,
4306                                                           idaw_len);
4307                                 idaw_dst = NULL;
4308                                 idaw_len = 0;
4309                                 end_idaw = 0;
4310                         }
4311                 }
4312         }
4313
4314         if (blk_noretry_request(req) ||
4315             block->base->features & DASD_FEATURE_FAILFAST)
4316                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4317         cqr->startdev = startdev;
4318         cqr->memdev = startdev;
4319         cqr->block = block;
4320         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4321         cqr->lpm = dasd_path_get_ppm(startdev);
4322         cqr->retries = startdev->default_retries;
4323         cqr->buildclk = get_tod_clock();
4324         cqr->status = DASD_CQR_FILLED;
4325
4326         /* Set flags to suppress output for expected errors */
4327         if (dasd_eckd_is_ese(basedev))
4328                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4329
4330         return cqr;
4331 }
4332
4333 static int prepare_itcw(struct itcw *itcw,
4334                         unsigned int trk, unsigned int totrk, int cmd,
4335                         struct dasd_device *basedev,
4336                         struct dasd_device *startdev,
4337                         unsigned int rec_on_trk, int count,
4338                         unsigned int blksize,
4339                         unsigned int total_data_size,
4340                         unsigned int tlf,
4341                         unsigned int blk_per_trk)
4342 {
4343         struct PFX_eckd_data pfxdata;
4344         struct dasd_eckd_private *basepriv, *startpriv;
4345         struct DE_eckd_data *dedata;
4346         struct LRE_eckd_data *lredata;
4347         struct dcw *dcw;
4348
4349         u32 begcyl, endcyl;
4350         u16 heads, beghead, endhead;
4351         u8 pfx_cmd;
4352
4353         int rc = 0;
4354         int sector = 0;
4355         int dn, d;
4356
4357
4358         /* setup prefix data */
4359         basepriv = basedev->private;
4360         startpriv = startdev->private;
4361         dedata = &pfxdata.define_extent;
4362         lredata = &pfxdata.locate_record;
4363
4364         memset(&pfxdata, 0, sizeof(pfxdata));
4365         pfxdata.format = 1; /* PFX with LRE */
4366         pfxdata.base_address = basepriv->conf.ned->unit_addr;
4367         pfxdata.base_lss = basepriv->conf.ned->ID;
4368         pfxdata.validity.define_extent = 1;
4369
4370         /* private uid is kept up to date, conf_data may be outdated */
4371         if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4372                 pfxdata.validity.verify_base = 1;
4373
4374         if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4375                 pfxdata.validity.verify_base = 1;
4376                 pfxdata.validity.hyper_pav = 1;
4377         }
4378
4379         switch (cmd) {
4380         case DASD_ECKD_CCW_READ_TRACK_DATA:
4381                 dedata->mask.perm = 0x1;
4382                 dedata->attributes.operation = basepriv->attrib.operation;
4383                 dedata->blk_size = blksize;
4384                 dedata->ga_extended |= 0x42;
4385                 lredata->operation.orientation = 0x0;
4386                 lredata->operation.operation = 0x0C;
4387                 lredata->auxiliary.check_bytes = 0x01;
4388                 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4389                 break;
4390         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4391                 dedata->mask.perm = 0x02;
4392                 dedata->attributes.operation = basepriv->attrib.operation;
4393                 dedata->blk_size = blksize;
4394                 rc = set_timestamp(NULL, dedata, basedev);
4395                 dedata->ga_extended |= 0x42;
4396                 lredata->operation.orientation = 0x0;
4397                 lredata->operation.operation = 0x3F;
4398                 lredata->extended_operation = 0x23;
4399                 lredata->auxiliary.check_bytes = 0x2;
4400                 /*
4401                  * If XRC is supported the System Time Stamp is set. The
4402                  * validity of the time stamp must be reflected in the prefix
4403                  * data as well.
4404                  */
4405                 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4406                         pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
4407                 pfx_cmd = DASD_ECKD_CCW_PFX;
4408                 break;
4409         case DASD_ECKD_CCW_READ_COUNT_MT:
4410                 dedata->mask.perm = 0x1;
4411                 dedata->attributes.operation = DASD_BYPASS_CACHE;
4412                 dedata->ga_extended |= 0x42;
4413                 dedata->blk_size = blksize;
4414                 lredata->operation.orientation = 0x2;
4415                 lredata->operation.operation = 0x16;
4416                 lredata->auxiliary.check_bytes = 0x01;
4417                 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4418                 break;
4419         default:
4420                 DBF_DEV_EVENT(DBF_ERR, basedev,
4421                               "prepare itcw, unknown opcode 0x%x", cmd);
4422                 BUG();
4423                 break;
4424         }
4425         if (rc)
4426                 return rc;
4427
4428         dedata->attributes.mode = 0x3;  /* ECKD */
4429
4430         heads = basepriv->rdc_data.trk_per_cyl;
4431         begcyl = trk / heads;
4432         beghead = trk % heads;
4433         endcyl = totrk / heads;
4434         endhead = totrk % heads;
4435
4436         /* check for sequential prestage - enhance cylinder range */
4437         if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4438             dedata->attributes.operation == DASD_SEQ_ACCESS) {
4439
4440                 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4441                         endcyl += basepriv->attrib.nr_cyl;
4442                 else
4443                         endcyl = (basepriv->real_cyl - 1);
4444         }
4445
4446         set_ch_t(&dedata->beg_ext, begcyl, beghead);
4447         set_ch_t(&dedata->end_ext, endcyl, endhead);
4448
4449         dedata->ep_format = 0x20; /* records per track is valid */
4450         dedata->ep_rec_per_track = blk_per_trk;
4451
4452         if (rec_on_trk) {
4453                 switch (basepriv->rdc_data.dev_type) {
4454                 case 0x3390:
4455                         dn = ceil_quot(blksize + 6, 232);
4456                         d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4457                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4458                         break;
4459                 case 0x3380:
4460                         d = 7 + ceil_quot(blksize + 12, 32);
4461                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4462                         break;
4463                 }
4464         }
4465
4466         if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4467                 lredata->auxiliary.length_valid = 0;
4468                 lredata->auxiliary.length_scope = 0;
4469                 lredata->sector = 0xff;
4470         } else {
4471                 lredata->auxiliary.length_valid = 1;
4472                 lredata->auxiliary.length_scope = 1;
4473                 lredata->sector = sector;
4474         }
4475         lredata->auxiliary.imbedded_ccw_valid = 1;
4476         lredata->length = tlf;
4477         lredata->imbedded_ccw = cmd;
4478         lredata->count = count;
4479         set_ch_t(&lredata->seek_addr, begcyl, beghead);
4480         lredata->search_arg.cyl = lredata->seek_addr.cyl;
4481         lredata->search_arg.head = lredata->seek_addr.head;
4482         lredata->search_arg.record = rec_on_trk;
4483
4484         dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4485                      &pfxdata, sizeof(pfxdata), total_data_size);
4486         return PTR_ERR_OR_ZERO(dcw);
4487 }
4488
4489 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4490                                                struct dasd_device *startdev,
4491                                                struct dasd_block *block,
4492                                                struct request *req,
4493                                                sector_t first_rec,
4494                                                sector_t last_rec,
4495                                                sector_t first_trk,
4496                                                sector_t last_trk,
4497                                                unsigned int first_offs,
4498                                                unsigned int last_offs,
4499                                                unsigned int blk_per_trk,
4500                                                unsigned int blksize)
4501 {
4502         struct dasd_ccw_req *cqr;
4503         struct req_iterator iter;
4504         struct bio_vec bv;
4505         char *dst;
4506         unsigned int trkcount, ctidaw;
4507         unsigned char cmd;
4508         struct dasd_device *basedev;
4509         unsigned int tlf;
4510         struct itcw *itcw;
4511         struct tidaw *last_tidaw = NULL;
4512         int itcw_op;
4513         size_t itcw_size;
4514         u8 tidaw_flags;
4515         unsigned int seg_len, part_len, len_to_track_end;
4516         unsigned char new_track;
4517         sector_t recid, trkid;
4518         unsigned int offs;
4519         unsigned int count, count_to_trk_end;
4520         int ret;
4521
4522         basedev = block->base;
4523         if (rq_data_dir(req) == READ) {
4524                 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4525                 itcw_op = ITCW_OP_READ;
4526         } else if (rq_data_dir(req) == WRITE) {
4527                 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4528                 itcw_op = ITCW_OP_WRITE;
4529         } else
4530                 return ERR_PTR(-EINVAL);
4531
4532         /* trackbased I/O needs address all memory via TIDAWs,
4533          * not just for 64 bit addresses. This allows us to map
4534          * each segment directly to one tidaw.
4535          * In the case of write requests, additional tidaws may
4536          * be needed when a segment crosses a track boundary.
4537          */
4538         trkcount = last_trk - first_trk + 1;
4539         ctidaw = 0;
4540         rq_for_each_segment(bv, req, iter) {
4541                 ++ctidaw;
4542         }
4543         if (rq_data_dir(req) == WRITE)
4544                 ctidaw += (last_trk - first_trk);
4545
4546         /* Allocate the ccw request. */
4547         itcw_size = itcw_calc_size(0, ctidaw, 0);
4548         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4549                                    blk_mq_rq_to_pdu(req));
4550         if (IS_ERR(cqr))
4551                 return cqr;
4552
4553         /* transfer length factor: how many bytes to read from the last track */
4554         if (first_trk == last_trk)
4555                 tlf = last_offs - first_offs + 1;
4556         else
4557                 tlf = last_offs + 1;
4558         tlf *= blksize;
4559
4560         itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4561         if (IS_ERR(itcw)) {
4562                 ret = -EINVAL;
4563                 goto out_error;
4564         }
4565         cqr->cpaddr = itcw_get_tcw(itcw);
4566         if (prepare_itcw(itcw, first_trk, last_trk,
4567                          cmd, basedev, startdev,
4568                          first_offs + 1,
4569                          trkcount, blksize,
4570                          (last_rec - first_rec + 1) * blksize,
4571                          tlf, blk_per_trk) == -EAGAIN) {
4572                 /* Clock not in sync and XRC is enabled.
4573                  * Try again later.
4574                  */
4575                 ret = -EAGAIN;
4576                 goto out_error;
4577         }
4578         len_to_track_end = 0;
4579         /*
4580          * A tidaw can address 4k of memory, but must not cross page boundaries
4581          * We can let the block layer handle this by setting
4582          * blk_queue_segment_boundary to page boundaries and
4583          * blk_max_segment_size to page size when setting up the request queue.
4584          * For write requests, a TIDAW must not cross track boundaries, because
4585          * we have to set the CBC flag on the last tidaw for each track.
4586          */
4587         if (rq_data_dir(req) == WRITE) {
4588                 new_track = 1;
4589                 recid = first_rec;
4590                 rq_for_each_segment(bv, req, iter) {
4591                         dst = bvec_virt(&bv);
4592                         seg_len = bv.bv_len;
4593                         while (seg_len) {
4594                                 if (new_track) {
4595                                         trkid = recid;
4596                                         offs = sector_div(trkid, blk_per_trk);
4597                                         count_to_trk_end = blk_per_trk - offs;
4598                                         count = min((last_rec - recid + 1),
4599                                                     (sector_t)count_to_trk_end);
4600                                         len_to_track_end = count * blksize;
4601                                         recid += count;
4602                                         new_track = 0;
4603                                 }
4604                                 part_len = min(seg_len, len_to_track_end);
4605                                 seg_len -= part_len;
4606                                 len_to_track_end -= part_len;
4607                                 /* We need to end the tidaw at track end */
4608                                 if (!len_to_track_end) {
4609                                         new_track = 1;
4610                                         tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4611                                 } else
4612                                         tidaw_flags = 0;
4613                                 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4614                                                             dst, part_len);
4615                                 if (IS_ERR(last_tidaw)) {
4616                                         ret = -EINVAL;
4617                                         goto out_error;
4618                                 }
4619                                 dst += part_len;
4620                         }
4621                 }
4622         } else {
4623                 rq_for_each_segment(bv, req, iter) {
4624                         dst = bvec_virt(&bv);
4625                         last_tidaw = itcw_add_tidaw(itcw, 0x00,
4626                                                     dst, bv.bv_len);
4627                         if (IS_ERR(last_tidaw)) {
4628                                 ret = -EINVAL;
4629                                 goto out_error;
4630                         }
4631                 }
4632         }
4633         last_tidaw->flags |= TIDAW_FLAGS_LAST;
4634         last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4635         itcw_finalize(itcw);
4636
4637         if (blk_noretry_request(req) ||
4638             block->base->features & DASD_FEATURE_FAILFAST)
4639                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4640         cqr->cpmode = 1;
4641         cqr->startdev = startdev;
4642         cqr->memdev = startdev;
4643         cqr->block = block;
4644         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4645         cqr->lpm = dasd_path_get_ppm(startdev);
4646         cqr->retries = startdev->default_retries;
4647         cqr->buildclk = get_tod_clock();
4648         cqr->status = DASD_CQR_FILLED;
4649
4650         /* Set flags to suppress output for expected errors */
4651         if (dasd_eckd_is_ese(basedev)) {
4652                 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4653                 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4654                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4655         }
4656
4657         return cqr;
4658 out_error:
4659         dasd_sfree_request(cqr, startdev);
4660         return ERR_PTR(ret);
4661 }
4662
4663 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4664                                                struct dasd_block *block,
4665                                                struct request *req)
4666 {
4667         int cmdrtd, cmdwtd;
4668         int use_prefix;
4669         int fcx_multitrack;
4670         struct dasd_eckd_private *private;
4671         struct dasd_device *basedev;
4672         sector_t first_rec, last_rec;
4673         sector_t first_trk, last_trk;
4674         unsigned int first_offs, last_offs;
4675         unsigned int blk_per_trk, blksize;
4676         int cdlspecial;
4677         unsigned int data_size;
4678         struct dasd_ccw_req *cqr;
4679
4680         basedev = block->base;
4681         private = basedev->private;
4682
4683         /* Calculate number of blocks/records per track. */
4684         blksize = block->bp_block;
4685         blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4686         if (blk_per_trk == 0)
4687                 return ERR_PTR(-EINVAL);
4688         /* Calculate record id of first and last block. */
4689         first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4690         first_offs = sector_div(first_trk, blk_per_trk);
4691         last_rec = last_trk =
4692                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4693         last_offs = sector_div(last_trk, blk_per_trk);
4694         cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4695
4696         fcx_multitrack = private->features.feature[40] & 0x20;
4697         data_size = blk_rq_bytes(req);
4698         if (data_size % blksize)
4699                 return ERR_PTR(-EINVAL);
4700         /* tpm write request add CBC data on each track boundary */
4701         if (rq_data_dir(req) == WRITE)
4702                 data_size += (last_trk - first_trk) * 4;
4703
4704         /* is read track data and write track data in command mode supported? */
4705         cmdrtd = private->features.feature[9] & 0x20;
4706         cmdwtd = private->features.feature[12] & 0x40;
4707         use_prefix = private->features.feature[8] & 0x01;
4708
4709         cqr = NULL;
4710         if (cdlspecial || dasd_page_cache) {
4711                 /* do nothing, just fall through to the cmd mode single case */
4712         } else if ((data_size <= private->fcx_max_data)
4713                    && (fcx_multitrack || (first_trk == last_trk))) {
4714                 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4715                                                     first_rec, last_rec,
4716                                                     first_trk, last_trk,
4717                                                     first_offs, last_offs,
4718                                                     blk_per_trk, blksize);
4719                 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4720                     (PTR_ERR(cqr) != -ENOMEM))
4721                         cqr = NULL;
4722         } else if (use_prefix &&
4723                    (((rq_data_dir(req) == READ) && cmdrtd) ||
4724                     ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4725                 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4726                                                    first_rec, last_rec,
4727                                                    first_trk, last_trk,
4728                                                    first_offs, last_offs,
4729                                                    blk_per_trk, blksize);
4730                 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4731                     (PTR_ERR(cqr) != -ENOMEM))
4732                         cqr = NULL;
4733         }
4734         if (!cqr)
4735                 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4736                                                     first_rec, last_rec,
4737                                                     first_trk, last_trk,
4738                                                     first_offs, last_offs,
4739                                                     blk_per_trk, blksize);
4740         return cqr;
4741 }
4742
4743 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4744                                                    struct dasd_block *block,
4745                                                    struct request *req)
4746 {
4747         sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4748         unsigned int seg_len, len_to_track_end;
4749         unsigned int cidaw, cplength, datasize;
4750         sector_t first_trk, last_trk, sectors;
4751         struct dasd_eckd_private *base_priv;
4752         struct dasd_device *basedev;
4753         struct req_iterator iter;
4754         struct dasd_ccw_req *cqr;
4755         unsigned int trkcount;
4756         unsigned long *idaws;
4757         unsigned int size;
4758         unsigned char cmd;
4759         struct bio_vec bv;
4760         struct ccw1 *ccw;
4761         int use_prefix;
4762         void *data;
4763         char *dst;
4764
4765         /*
4766          * raw track access needs to be mutiple of 64k and on 64k boundary
4767          * For read requests we can fix an incorrect alignment by padding
4768          * the request with dummy pages.
4769          */
4770         start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4771         end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4772                 DASD_RAW_SECTORS_PER_TRACK;
4773         end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4774                 DASD_RAW_SECTORS_PER_TRACK;
4775         basedev = block->base;
4776         if ((start_padding_sectors || end_padding_sectors) &&
4777             (rq_data_dir(req) == WRITE)) {
4778                 DBF_DEV_EVENT(DBF_ERR, basedev,
4779                               "raw write not track aligned (%llu,%llu) req %p",
4780                               start_padding_sectors, end_padding_sectors, req);
4781                 return ERR_PTR(-EINVAL);
4782         }
4783
4784         first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4785         last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4786                 DASD_RAW_SECTORS_PER_TRACK;
4787         trkcount = last_trk - first_trk + 1;
4788
4789         if (rq_data_dir(req) == READ)
4790                 cmd = DASD_ECKD_CCW_READ_TRACK;
4791         else if (rq_data_dir(req) == WRITE)
4792                 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4793         else
4794                 return ERR_PTR(-EINVAL);
4795
4796         /*
4797          * Raw track based I/O needs IDAWs for each page,
4798          * and not just for 64 bit addresses.
4799          */
4800         cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4801
4802         /*
4803          * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4804          * of extended parameter. This is needed for write full track.
4805          */
4806         base_priv = basedev->private;
4807         use_prefix = base_priv->features.feature[8] & 0x01;
4808         if (use_prefix) {
4809                 cplength = 1 + trkcount;
4810                 size = sizeof(struct PFX_eckd_data) + 2;
4811         } else {
4812                 cplength = 2 + trkcount;
4813                 size = sizeof(struct DE_eckd_data) +
4814                         sizeof(struct LRE_eckd_data) + 2;
4815         }
4816         size = ALIGN(size, 8);
4817
4818         datasize = size + cidaw * sizeof(unsigned long);
4819
4820         /* Allocate the ccw request. */
4821         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4822                                    datasize, startdev, blk_mq_rq_to_pdu(req));
4823         if (IS_ERR(cqr))
4824                 return cqr;
4825
4826         ccw = cqr->cpaddr;
4827         data = cqr->data;
4828
4829         if (use_prefix) {
4830                 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4831                            startdev, 1, 0, trkcount, 0, 0);
4832         } else {
4833                 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4834                 ccw[-1].flags |= CCW_FLAG_CC;
4835
4836                 data += sizeof(struct DE_eckd_data);
4837                 locate_record_ext(ccw++, data, first_trk, 0,
4838                                   trkcount, cmd, basedev, 0, 0);
4839         }
4840
4841         idaws = (unsigned long *)(cqr->data + size);
4842         len_to_track_end = 0;
4843         if (start_padding_sectors) {
4844                 ccw[-1].flags |= CCW_FLAG_CC;
4845                 ccw->cmd_code = cmd;
4846                 /* maximum 3390 track size */
4847                 ccw->count = 57326;
4848                 /* 64k map to one track */
4849                 len_to_track_end = 65536 - start_padding_sectors * 512;
4850                 ccw->cda = (__u32)virt_to_phys(idaws);
4851                 ccw->flags |= CCW_FLAG_IDA;
4852                 ccw->flags |= CCW_FLAG_SLI;
4853                 ccw++;
4854                 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4855                         idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4856         }
4857         rq_for_each_segment(bv, req, iter) {
4858                 dst = bvec_virt(&bv);
4859                 seg_len = bv.bv_len;
4860                 if (cmd == DASD_ECKD_CCW_READ_TRACK)
4861                         memset(dst, 0, seg_len);
4862                 if (!len_to_track_end) {
4863                         ccw[-1].flags |= CCW_FLAG_CC;
4864                         ccw->cmd_code = cmd;
4865                         /* maximum 3390 track size */
4866                         ccw->count = 57326;
4867                         /* 64k map to one track */
4868                         len_to_track_end = 65536;
4869                         ccw->cda = (__u32)virt_to_phys(idaws);
4870                         ccw->flags |= CCW_FLAG_IDA;
4871                         ccw->flags |= CCW_FLAG_SLI;
4872                         ccw++;
4873                 }
4874                 len_to_track_end -= seg_len;
4875                 idaws = idal_create_words(idaws, dst, seg_len);
4876         }
4877         for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4878                 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4879         if (blk_noretry_request(req) ||
4880             block->base->features & DASD_FEATURE_FAILFAST)
4881                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4882         cqr->startdev = startdev;
4883         cqr->memdev = startdev;
4884         cqr->block = block;
4885         cqr->expires = startdev->default_expires * HZ;
4886         cqr->lpm = dasd_path_get_ppm(startdev);
4887         cqr->retries = startdev->default_retries;
4888         cqr->buildclk = get_tod_clock();
4889         cqr->status = DASD_CQR_FILLED;
4890
4891         return cqr;
4892 }
4893
4894
4895 static int
4896 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4897 {
4898         struct dasd_eckd_private *private;
4899         struct ccw1 *ccw;
4900         struct req_iterator iter;
4901         struct bio_vec bv;
4902         char *dst, *cda;
4903         unsigned int blksize, blk_per_trk, off;
4904         sector_t recid;
4905         int status;
4906
4907         if (!dasd_page_cache)
4908                 goto out;
4909         private = cqr->block->base->private;
4910         blksize = cqr->block->bp_block;
4911         blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4912         recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4913         ccw = cqr->cpaddr;
4914         /* Skip over define extent & locate record. */
4915         ccw++;
4916         if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4917                 ccw++;
4918         rq_for_each_segment(bv, req, iter) {
4919                 dst = bvec_virt(&bv);
4920                 for (off = 0; off < bv.bv_len; off += blksize) {
4921                         /* Skip locate record. */
4922                         if (private->uses_cdl && recid <= 2*blk_per_trk)
4923                                 ccw++;
4924                         if (dst) {
4925                                 if (ccw->flags & CCW_FLAG_IDA)
4926                                         cda = *((char **)phys_to_virt(ccw->cda));
4927                                 else
4928                                         cda = phys_to_virt(ccw->cda);
4929                                 if (dst != cda) {
4930                                         if (rq_data_dir(req) == READ)
4931                                                 memcpy(dst, cda, bv.bv_len);
4932                                         kmem_cache_free(dasd_page_cache,
4933                                             (void *)((addr_t)cda & PAGE_MASK));
4934                                 }
4935                                 dst = NULL;
4936                         }
4937                         ccw++;
4938                         recid++;
4939                 }
4940         }
4941 out:
4942         status = cqr->status == DASD_CQR_DONE;
4943         dasd_sfree_request(cqr, cqr->memdev);
4944         return status;
4945 }
4946
4947 /*
4948  * Modify ccw/tcw in cqr so it can be started on a base device.
4949  *
4950  * Note that this is not enough to restart the cqr!
4951  * Either reset cqr->startdev as well (summary unit check handling)
4952  * or restart via separate cqr (as in ERP handling).
4953  */
4954 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4955 {
4956         struct ccw1 *ccw;
4957         struct PFX_eckd_data *pfxdata;
4958         struct tcw *tcw;
4959         struct tccb *tccb;
4960         struct dcw *dcw;
4961
4962         if (cqr->cpmode == 1) {
4963                 tcw = cqr->cpaddr;
4964                 tccb = tcw_get_tccb(tcw);
4965                 dcw = (struct dcw *)&tccb->tca[0];
4966                 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4967                 pfxdata->validity.verify_base = 0;
4968                 pfxdata->validity.hyper_pav = 0;
4969         } else {
4970                 ccw = cqr->cpaddr;
4971                 pfxdata = cqr->data;
4972                 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4973                         pfxdata->validity.verify_base = 0;
4974                         pfxdata->validity.hyper_pav = 0;
4975                 }
4976         }
4977 }
4978
4979 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4980
4981 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4982                                                      struct dasd_block *block,
4983                                                      struct request *req)
4984 {
4985         struct dasd_eckd_private *private;
4986         struct dasd_device *startdev;
4987         unsigned long flags;
4988         struct dasd_ccw_req *cqr;
4989
4990         startdev = dasd_alias_get_start_dev(base);
4991         if (!startdev)
4992                 startdev = base;
4993         private = startdev->private;
4994         if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4995                 return ERR_PTR(-EBUSY);
4996
4997         spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4998         private->count++;
4999         if ((base->features & DASD_FEATURE_USERAW))
5000                 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
5001         else
5002                 cqr = dasd_eckd_build_cp(startdev, block, req);
5003         if (IS_ERR(cqr))
5004                 private->count--;
5005         spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
5006         return cqr;
5007 }
5008
5009 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
5010                                    struct request *req)
5011 {
5012         struct dasd_eckd_private *private;
5013         unsigned long flags;
5014
5015         spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
5016         private = cqr->memdev->private;
5017         private->count--;
5018         spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
5019         return dasd_eckd_free_cp(cqr, req);
5020 }
5021
5022 static int
5023 dasd_eckd_fill_info(struct dasd_device * device,
5024                     struct dasd_information2_t * info)
5025 {
5026         struct dasd_eckd_private *private = device->private;
5027
5028         info->label_block = 2;
5029         info->FBA_layout = private->uses_cdl ? 0 : 1;
5030         info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
5031         info->characteristics_size = sizeof(private->rdc_data);
5032         memcpy(info->characteristics, &private->rdc_data,
5033                sizeof(private->rdc_data));
5034         info->confdata_size = min_t(unsigned long, private->conf.len,
5035                                     sizeof(info->configuration_data));
5036         memcpy(info->configuration_data, private->conf.data,
5037                info->confdata_size);
5038         return 0;
5039 }
5040
5041 /*
5042  * SECTION: ioctl functions for eckd devices.
5043  */
5044
5045 /*
5046  * Release device ioctl.
5047  * Buils a channel programm to releases a prior reserved
5048  * (see dasd_eckd_reserve) device.
5049  */
5050 static int
5051 dasd_eckd_release(struct dasd_device *device)
5052 {
5053         struct dasd_ccw_req *cqr;
5054         int rc;
5055         struct ccw1 *ccw;
5056         int useglobal;
5057
5058         if (!capable(CAP_SYS_ADMIN))
5059                 return -EACCES;
5060
5061         useglobal = 0;
5062         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5063         if (IS_ERR(cqr)) {
5064                 mutex_lock(&dasd_reserve_mutex);
5065                 useglobal = 1;
5066                 cqr = &dasd_reserve_req->cqr;
5067                 memset(cqr, 0, sizeof(*cqr));
5068                 memset(&dasd_reserve_req->ccw, 0,
5069                        sizeof(dasd_reserve_req->ccw));
5070                 cqr->cpaddr = &dasd_reserve_req->ccw;
5071                 cqr->data = &dasd_reserve_req->data;
5072                 cqr->magic = DASD_ECKD_MAGIC;
5073         }
5074         ccw = cqr->cpaddr;
5075         ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
5076         ccw->flags |= CCW_FLAG_SLI;
5077         ccw->count = 32;
5078         ccw->cda = (__u32)virt_to_phys(cqr->data);
5079         cqr->startdev = device;
5080         cqr->memdev = device;
5081         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5082         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5083         cqr->retries = 2;       /* set retry counter to enable basic ERP */
5084         cqr->expires = 2 * HZ;
5085         cqr->buildclk = get_tod_clock();
5086         cqr->status = DASD_CQR_FILLED;
5087
5088         rc = dasd_sleep_on_immediatly(cqr);
5089         if (!rc)
5090                 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5091
5092         if (useglobal)
5093                 mutex_unlock(&dasd_reserve_mutex);
5094         else
5095                 dasd_sfree_request(cqr, cqr->memdev);
5096         return rc;
5097 }
5098
5099 /*
5100  * Reserve device ioctl.
5101  * Options are set to 'synchronous wait for interrupt' and
5102  * 'timeout the request'. This leads to a terminate IO if
5103  * the interrupt is outstanding for a certain time.
5104  */
5105 static int
5106 dasd_eckd_reserve(struct dasd_device *device)
5107 {
5108         struct dasd_ccw_req *cqr;
5109         int rc;
5110         struct ccw1 *ccw;
5111         int useglobal;
5112
5113         if (!capable(CAP_SYS_ADMIN))
5114                 return -EACCES;
5115
5116         useglobal = 0;
5117         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5118         if (IS_ERR(cqr)) {
5119                 mutex_lock(&dasd_reserve_mutex);
5120                 useglobal = 1;
5121                 cqr = &dasd_reserve_req->cqr;
5122                 memset(cqr, 0, sizeof(*cqr));
5123                 memset(&dasd_reserve_req->ccw, 0,
5124                        sizeof(dasd_reserve_req->ccw));
5125                 cqr->cpaddr = &dasd_reserve_req->ccw;
5126                 cqr->data = &dasd_reserve_req->data;
5127                 cqr->magic = DASD_ECKD_MAGIC;
5128         }
5129         ccw = cqr->cpaddr;
5130         ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
5131         ccw->flags |= CCW_FLAG_SLI;
5132         ccw->count = 32;
5133         ccw->cda = (__u32)virt_to_phys(cqr->data);
5134         cqr->startdev = device;
5135         cqr->memdev = device;
5136         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5137         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5138         cqr->retries = 2;       /* set retry counter to enable basic ERP */
5139         cqr->expires = 2 * HZ;
5140         cqr->buildclk = get_tod_clock();
5141         cqr->status = DASD_CQR_FILLED;
5142
5143         rc = dasd_sleep_on_immediatly(cqr);
5144         if (!rc)
5145                 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5146
5147         if (useglobal)
5148                 mutex_unlock(&dasd_reserve_mutex);
5149         else
5150                 dasd_sfree_request(cqr, cqr->memdev);
5151         return rc;
5152 }
5153
5154 /*
5155  * Steal lock ioctl - unconditional reserve device.
5156  * Buils a channel programm to break a device's reservation.
5157  * (unconditional reserve)
5158  */
5159 static int
5160 dasd_eckd_steal_lock(struct dasd_device *device)
5161 {
5162         struct dasd_ccw_req *cqr;
5163         int rc;
5164         struct ccw1 *ccw;
5165         int useglobal;
5166
5167         if (!capable(CAP_SYS_ADMIN))
5168                 return -EACCES;
5169
5170         useglobal = 0;
5171         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5172         if (IS_ERR(cqr)) {
5173                 mutex_lock(&dasd_reserve_mutex);
5174                 useglobal = 1;
5175                 cqr = &dasd_reserve_req->cqr;
5176                 memset(cqr, 0, sizeof(*cqr));
5177                 memset(&dasd_reserve_req->ccw, 0,
5178                        sizeof(dasd_reserve_req->ccw));
5179                 cqr->cpaddr = &dasd_reserve_req->ccw;
5180                 cqr->data = &dasd_reserve_req->data;
5181                 cqr->magic = DASD_ECKD_MAGIC;
5182         }
5183         ccw = cqr->cpaddr;
5184         ccw->cmd_code = DASD_ECKD_CCW_SLCK;
5185         ccw->flags |= CCW_FLAG_SLI;
5186         ccw->count = 32;
5187         ccw->cda = (__u32)virt_to_phys(cqr->data);
5188         cqr->startdev = device;
5189         cqr->memdev = device;
5190         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5191         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5192         cqr->retries = 2;       /* set retry counter to enable basic ERP */
5193         cqr->expires = 2 * HZ;
5194         cqr->buildclk = get_tod_clock();
5195         cqr->status = DASD_CQR_FILLED;
5196
5197         rc = dasd_sleep_on_immediatly(cqr);
5198         if (!rc)
5199                 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5200
5201         if (useglobal)
5202                 mutex_unlock(&dasd_reserve_mutex);
5203         else
5204                 dasd_sfree_request(cqr, cqr->memdev);
5205         return rc;
5206 }
5207
5208 /*
5209  * SNID - Sense Path Group ID
5210  * This ioctl may be used in situations where I/O is stalled due to
5211  * a reserve, so if the normal dasd_smalloc_request fails, we use the
5212  * preallocated dasd_reserve_req.
5213  */
5214 static int dasd_eckd_snid(struct dasd_device *device,
5215                           void __user *argp)
5216 {
5217         struct dasd_ccw_req *cqr;
5218         int rc;
5219         struct ccw1 *ccw;
5220         int useglobal;
5221         struct dasd_snid_ioctl_data usrparm;
5222
5223         if (!capable(CAP_SYS_ADMIN))
5224                 return -EACCES;
5225
5226         if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5227                 return -EFAULT;
5228
5229         useglobal = 0;
5230         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5231                                    sizeof(struct dasd_snid_data), device,
5232                                    NULL);
5233         if (IS_ERR(cqr)) {
5234                 mutex_lock(&dasd_reserve_mutex);
5235                 useglobal = 1;
5236                 cqr = &dasd_reserve_req->cqr;
5237                 memset(cqr, 0, sizeof(*cqr));
5238                 memset(&dasd_reserve_req->ccw, 0,
5239                        sizeof(dasd_reserve_req->ccw));
5240                 cqr->cpaddr = &dasd_reserve_req->ccw;
5241                 cqr->data = &dasd_reserve_req->data;
5242                 cqr->magic = DASD_ECKD_MAGIC;
5243         }
5244         ccw = cqr->cpaddr;
5245         ccw->cmd_code = DASD_ECKD_CCW_SNID;
5246         ccw->flags |= CCW_FLAG_SLI;
5247         ccw->count = 12;
5248         ccw->cda = (__u32)virt_to_phys(cqr->data);
5249         cqr->startdev = device;
5250         cqr->memdev = device;
5251         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5252         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5253         set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5254         cqr->retries = 5;
5255         cqr->expires = 10 * HZ;
5256         cqr->buildclk = get_tod_clock();
5257         cqr->status = DASD_CQR_FILLED;
5258         cqr->lpm = usrparm.path_mask;
5259
5260         rc = dasd_sleep_on_immediatly(cqr);
5261         /* verify that I/O processing didn't modify the path mask */
5262         if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5263                 rc = -EIO;
5264         if (!rc) {
5265                 usrparm.data = *((struct dasd_snid_data *)cqr->data);
5266                 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
5267                         rc = -EFAULT;
5268         }
5269
5270         if (useglobal)
5271                 mutex_unlock(&dasd_reserve_mutex);
5272         else
5273                 dasd_sfree_request(cqr, cqr->memdev);
5274         return rc;
5275 }
5276
5277 /*
5278  * Read performance statistics
5279  */
5280 static int
5281 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
5282 {
5283         struct dasd_psf_prssd_data *prssdp;
5284         struct dasd_rssd_perf_stats_t *stats;
5285         struct dasd_ccw_req *cqr;
5286         struct ccw1 *ccw;
5287         int rc;
5288
5289         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
5290                                    (sizeof(struct dasd_psf_prssd_data) +
5291                                     sizeof(struct dasd_rssd_perf_stats_t)),
5292                                    device, NULL);
5293         if (IS_ERR(cqr)) {
5294                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5295                             "Could not allocate initialization request");
5296                 return PTR_ERR(cqr);
5297         }
5298         cqr->startdev = device;
5299         cqr->memdev = device;
5300         cqr->retries = 0;
5301         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5302         cqr->expires = 10 * HZ;
5303
5304         /* Prepare for Read Subsystem Data */
5305         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5306         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5307         prssdp->order = PSF_ORDER_PRSSD;
5308         prssdp->suborder = 0x01;        /* Performance Statistics */
5309         prssdp->varies[1] = 0x01;       /* Perf Statistics for the Subsystem */
5310
5311         ccw = cqr->cpaddr;
5312         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5313         ccw->count = sizeof(struct dasd_psf_prssd_data);
5314         ccw->flags |= CCW_FLAG_CC;
5315         ccw->cda = (__u32)virt_to_phys(prssdp);
5316
5317         /* Read Subsystem Data - Performance Statistics */
5318         stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5319         memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5320
5321         ccw++;
5322         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5323         ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5324         ccw->cda = (__u32)virt_to_phys(stats);
5325
5326         cqr->buildclk = get_tod_clock();
5327         cqr->status = DASD_CQR_FILLED;
5328         rc = dasd_sleep_on(cqr);
5329         if (rc == 0) {
5330                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5331                 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5332                 if (copy_to_user(argp, stats,
5333                                  sizeof(struct dasd_rssd_perf_stats_t)))
5334                         rc = -EFAULT;
5335         }
5336         dasd_sfree_request(cqr, cqr->memdev);
5337         return rc;
5338 }
5339
5340 /*
5341  * Get attributes (cache operations)
5342  * Returnes the cache attributes used in Define Extend (DE).
5343  */
5344 static int
5345 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5346 {
5347         struct dasd_eckd_private *private = device->private;
5348         struct attrib_data_t attrib = private->attrib;
5349         int rc;
5350
5351         if (!capable(CAP_SYS_ADMIN))
5352                 return -EACCES;
5353         if (!argp)
5354                 return -EINVAL;
5355
5356         rc = 0;
5357         if (copy_to_user(argp, (long *) &attrib,
5358                          sizeof(struct attrib_data_t)))
5359                 rc = -EFAULT;
5360
5361         return rc;
5362 }
5363
5364 /*
5365  * Set attributes (cache operations)
5366  * Stores the attributes for cache operation to be used in Define Extend (DE).
5367  */
5368 static int
5369 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5370 {
5371         struct dasd_eckd_private *private = device->private;
5372         struct attrib_data_t attrib;
5373
5374         if (!capable(CAP_SYS_ADMIN))
5375                 return -EACCES;
5376         if (!argp)
5377                 return -EINVAL;
5378
5379         if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5380                 return -EFAULT;
5381         private->attrib = attrib;
5382
5383         dev_info(&device->cdev->dev,
5384                  "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5385                  private->attrib.operation, private->attrib.nr_cyl);
5386         return 0;
5387 }
5388
5389 /*
5390  * Issue syscall I/O to EMC Symmetrix array.
5391  * CCWs are PSF and RSSD
5392  */
5393 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5394 {
5395         struct dasd_symmio_parms usrparm;
5396         char *psf_data, *rssd_result;
5397         struct dasd_ccw_req *cqr;
5398         struct ccw1 *ccw;
5399         char psf0, psf1;
5400         int rc;
5401
5402         if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5403                 return -EACCES;
5404         psf0 = psf1 = 0;
5405
5406         /* Copy parms from caller */
5407         rc = -EFAULT;
5408         if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5409                 goto out;
5410         if (is_compat_task()) {
5411                 /* Make sure pointers are sane even on 31 bit. */
5412                 rc = -EINVAL;
5413                 if ((usrparm.psf_data >> 32) != 0)
5414                         goto out;
5415                 if ((usrparm.rssd_result >> 32) != 0)
5416                         goto out;
5417                 usrparm.psf_data &= 0x7fffffffULL;
5418                 usrparm.rssd_result &= 0x7fffffffULL;
5419         }
5420         /* at least 2 bytes are accessed and should be allocated */
5421         if (usrparm.psf_data_len < 2) {
5422                 DBF_DEV_EVENT(DBF_WARNING, device,
5423                               "Symmetrix ioctl invalid data length %d",
5424                               usrparm.psf_data_len);
5425                 rc = -EINVAL;
5426                 goto out;
5427         }
5428         /* alloc I/O data area */
5429         psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5430         rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5431         if (!psf_data || !rssd_result) {
5432                 rc = -ENOMEM;
5433                 goto out_free;
5434         }
5435
5436         /* get syscall header from user space */
5437         rc = -EFAULT;
5438         if (copy_from_user(psf_data,
5439                            (void __user *)(unsigned long) usrparm.psf_data,
5440                            usrparm.psf_data_len))
5441                 goto out_free;
5442         psf0 = psf_data[0];
5443         psf1 = psf_data[1];
5444
5445         /* setup CCWs for PSF + RSSD */
5446         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5447         if (IS_ERR(cqr)) {
5448                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5449                         "Could not allocate initialization request");
5450                 rc = PTR_ERR(cqr);
5451                 goto out_free;
5452         }
5453
5454         cqr->startdev = device;
5455         cqr->memdev = device;
5456         cqr->retries = 3;
5457         cqr->expires = 10 * HZ;
5458         cqr->buildclk = get_tod_clock();
5459         cqr->status = DASD_CQR_FILLED;
5460
5461         /* Build the ccws */
5462         ccw = cqr->cpaddr;
5463
5464         /* PSF ccw */
5465         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5466         ccw->count = usrparm.psf_data_len;
5467         ccw->flags |= CCW_FLAG_CC;
5468         ccw->cda = (__u32)virt_to_phys(psf_data);
5469
5470         ccw++;
5471
5472         /* RSSD ccw  */
5473         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5474         ccw->count = usrparm.rssd_result_len;
5475         ccw->flags = CCW_FLAG_SLI ;
5476         ccw->cda = (__u32)virt_to_phys(rssd_result);
5477
5478         rc = dasd_sleep_on(cqr);
5479         if (rc)
5480                 goto out_sfree;
5481
5482         rc = -EFAULT;
5483         if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5484                            rssd_result, usrparm.rssd_result_len))
5485                 goto out_sfree;
5486         rc = 0;
5487
5488 out_sfree:
5489         dasd_sfree_request(cqr, cqr->memdev);
5490 out_free:
5491         kfree(rssd_result);
5492         kfree(psf_data);
5493 out:
5494         DBF_DEV_EVENT(DBF_WARNING, device,
5495                       "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5496                       (int) psf0, (int) psf1, rc);
5497         return rc;
5498 }
5499
5500 static int
5501 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5502 {
5503         struct dasd_device *device = block->base;
5504
5505         switch (cmd) {
5506         case BIODASDGATTR:
5507                 return dasd_eckd_get_attrib(device, argp);
5508         case BIODASDSATTR:
5509                 return dasd_eckd_set_attrib(device, argp);
5510         case BIODASDPSRD:
5511                 return dasd_eckd_performance(device, argp);
5512         case BIODASDRLSE:
5513                 return dasd_eckd_release(device);
5514         case BIODASDRSRV:
5515                 return dasd_eckd_reserve(device);
5516         case BIODASDSLCK:
5517                 return dasd_eckd_steal_lock(device);
5518         case BIODASDSNID:
5519                 return dasd_eckd_snid(device, argp);
5520         case BIODASDSYMMIO:
5521                 return dasd_symm_io(device, argp);
5522         default:
5523                 return -ENOTTY;
5524         }
5525 }
5526
5527 /*
5528  * Dump the range of CCWs into 'page' buffer
5529  * and return number of printed chars.
5530  */
5531 static void
5532 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
5533 {
5534         int len, count;
5535         char *datap;
5536
5537         len = 0;
5538         while (from <= to) {
5539                 len += sprintf(page + len, PRINTK_HEADER
5540                                " CCW %p: %08X %08X DAT:",
5541                                from, ((int *) from)[0], ((int *) from)[1]);
5542
5543                 /* get pointer to data (consider IDALs) */
5544                 if (from->flags & CCW_FLAG_IDA)
5545                         datap = (char *)*((addr_t *)phys_to_virt(from->cda));
5546                 else
5547                         datap = phys_to_virt(from->cda);
5548
5549                 /* dump data (max 128 bytes) */
5550                 for (count = 0; count < from->count && count < 128; count++) {
5551                         if (count % 32 == 0)
5552                                 len += sprintf(page + len, "\n");
5553                         if (count % 8 == 0)
5554                                 len += sprintf(page + len, " ");
5555                         if (count % 4 == 0)
5556                                 len += sprintf(page + len, " ");
5557                         len += sprintf(page + len, "%02x", datap[count]);
5558                 }
5559                 len += sprintf(page + len, "\n");
5560                 from++;
5561         }
5562         if (len > 0)
5563                 printk(KERN_ERR "%s", page);
5564 }
5565
5566 static void
5567 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5568                          char *reason)
5569 {
5570         u64 *sense;
5571         u64 *stat;
5572
5573         sense = (u64 *) dasd_get_sense(irb);
5574         stat = (u64 *) &irb->scsw;
5575         if (sense) {
5576                 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5577                               "%016llx %016llx %016llx %016llx",
5578                               reason, *stat, *((u32 *) (stat + 1)),
5579                               sense[0], sense[1], sense[2], sense[3]);
5580         } else {
5581                 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5582                               reason, *stat, *((u32 *) (stat + 1)),
5583                               "NO VALID SENSE");
5584         }
5585 }
5586
5587 /*
5588  * Print sense data and related channel program.
5589  * Parts are printed because printk buffer is only 1024 bytes.
5590  */
5591 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5592                                  struct dasd_ccw_req *req, struct irb *irb)
5593 {
5594         char *page;
5595         struct ccw1 *first, *last, *fail, *from, *to;
5596         int len, sl, sct;
5597
5598         page = (char *) get_zeroed_page(GFP_ATOMIC);
5599         if (page == NULL) {
5600                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5601                               "No memory to dump sense data\n");
5602                 return;
5603         }
5604         /* dump the sense data */
5605         len = sprintf(page, PRINTK_HEADER
5606                       " I/O status report for device %s:\n",
5607                       dev_name(&device->cdev->dev));
5608         len += sprintf(page + len, PRINTK_HEADER
5609                        " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5610                        "CS:%02X RC:%d\n",
5611                        req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5612                        scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5613                        scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5614                        req ? req->intrc : 0);
5615         len += sprintf(page + len, PRINTK_HEADER
5616                        " device %s: Failing CCW: %p\n",
5617                        dev_name(&device->cdev->dev),
5618                        phys_to_virt(irb->scsw.cmd.cpa));
5619         if (irb->esw.esw0.erw.cons) {
5620                 for (sl = 0; sl < 4; sl++) {
5621                         len += sprintf(page + len, PRINTK_HEADER
5622                                        " Sense(hex) %2d-%2d:",
5623                                        (8 * sl), ((8 * sl) + 7));
5624
5625                         for (sct = 0; sct < 8; sct++) {
5626                                 len += sprintf(page + len, " %02x",
5627                                                irb->ecw[8 * sl + sct]);
5628                         }
5629                         len += sprintf(page + len, "\n");
5630                 }
5631
5632                 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5633                         /* 24 Byte Sense Data */
5634                         sprintf(page + len, PRINTK_HEADER
5635                                 " 24 Byte: %x MSG %x, "
5636                                 "%s MSGb to SYSOP\n",
5637                                 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5638                                 irb->ecw[1] & 0x10 ? "" : "no");
5639                 } else {
5640                         /* 32 Byte Sense Data */
5641                         sprintf(page + len, PRINTK_HEADER
5642                                 " 32 Byte: Format: %x "
5643                                 "Exception class %x\n",
5644                                 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5645                 }
5646         } else {
5647                 sprintf(page + len, PRINTK_HEADER
5648                         " SORRY - NO VALID SENSE AVAILABLE\n");
5649         }
5650         printk(KERN_ERR "%s", page);
5651
5652         if (req) {
5653                 /* req == NULL for unsolicited interrupts */
5654                 /* dump the Channel Program (max 140 Bytes per line) */
5655                 /* Count CCW and print first CCWs (maximum 7) */
5656                 first = req->cpaddr;
5657                 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5658                 to = min(first + 6, last);
5659                 printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req);
5660                 dasd_eckd_dump_ccw_range(first, to, page);
5661
5662                 /* print failing CCW area (maximum 4) */
5663                 /* scsw->cda is either valid or zero  */
5664                 from = ++to;
5665                 fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
5666                 if (from <  fail - 2) {
5667                         from = fail - 2;     /* there is a gap - print header */
5668                         printk(KERN_ERR PRINTK_HEADER "......\n");
5669                 }
5670                 to = min(fail + 1, last);
5671                 dasd_eckd_dump_ccw_range(from, to, page + len);
5672
5673                 /* print last CCWs (maximum 2) */
5674                 len = 0;
5675                 from = max(from, ++to);
5676                 if (from < last - 1) {
5677                         from = last - 1;     /* there is a gap - print header */
5678                         printk(KERN_ERR PRINTK_HEADER "......\n");
5679                 }
5680                 dasd_eckd_dump_ccw_range(from, last, page + len);
5681         }
5682         free_page((unsigned long) page);
5683 }
5684
5685
5686 /*
5687  * Print sense data from a tcw.
5688  */
5689 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5690                                  struct dasd_ccw_req *req, struct irb *irb)
5691 {
5692         char *page;
5693         int len, sl, sct, residual;
5694         struct tsb *tsb;
5695         u8 *sense, *rcq;
5696
5697         page = (char *) get_zeroed_page(GFP_ATOMIC);
5698         if (page == NULL) {
5699                 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5700                             "No memory to dump sense data");
5701                 return;
5702         }
5703         /* dump the sense data */
5704         len = sprintf(page, PRINTK_HEADER
5705                       " I/O status report for device %s:\n",
5706                       dev_name(&device->cdev->dev));
5707         len += sprintf(page + len, PRINTK_HEADER
5708                        " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5709                        "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5710                        req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5711                        scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5712                        scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5713                        irb->scsw.tm.fcxs,
5714                        (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5715                        req ? req->intrc : 0);
5716         len += sprintf(page + len, PRINTK_HEADER
5717                        " device %s: Failing TCW: %p\n",
5718                        dev_name(&device->cdev->dev),
5719                        phys_to_virt(irb->scsw.tm.tcw));
5720
5721         tsb = NULL;
5722         sense = NULL;
5723         if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5724                 tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
5725
5726         if (tsb) {
5727                 len += sprintf(page + len, PRINTK_HEADER
5728                                " tsb->length %d\n", tsb->length);
5729                 len += sprintf(page + len, PRINTK_HEADER
5730                                " tsb->flags %x\n", tsb->flags);
5731                 len += sprintf(page + len, PRINTK_HEADER
5732                                " tsb->dcw_offset %d\n", tsb->dcw_offset);
5733                 len += sprintf(page + len, PRINTK_HEADER
5734                                " tsb->count %d\n", tsb->count);
5735                 residual = tsb->count - 28;
5736                 len += sprintf(page + len, PRINTK_HEADER
5737                                " residual %d\n", residual);
5738
5739                 switch (tsb->flags & 0x07) {
5740                 case 1: /* tsa_iostat */
5741                         len += sprintf(page + len, PRINTK_HEADER
5742                                " tsb->tsa.iostat.dev_time %d\n",
5743                                        tsb->tsa.iostat.dev_time);
5744                         len += sprintf(page + len, PRINTK_HEADER
5745                                " tsb->tsa.iostat.def_time %d\n",
5746                                        tsb->tsa.iostat.def_time);
5747                         len += sprintf(page + len, PRINTK_HEADER
5748                                " tsb->tsa.iostat.queue_time %d\n",
5749                                        tsb->tsa.iostat.queue_time);
5750                         len += sprintf(page + len, PRINTK_HEADER
5751                                " tsb->tsa.iostat.dev_busy_time %d\n",
5752                                        tsb->tsa.iostat.dev_busy_time);
5753                         len += sprintf(page + len, PRINTK_HEADER
5754                                " tsb->tsa.iostat.dev_act_time %d\n",
5755                                        tsb->tsa.iostat.dev_act_time);
5756                         sense = tsb->tsa.iostat.sense;
5757                         break;
5758                 case 2: /* ts_ddpc */
5759                         len += sprintf(page + len, PRINTK_HEADER
5760                                " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
5761                         for (sl = 0; sl < 2; sl++) {
5762                                 len += sprintf(page + len, PRINTK_HEADER
5763                                                " tsb->tsa.ddpc.rcq %2d-%2d: ",
5764                                                (8 * sl), ((8 * sl) + 7));
5765                                 rcq = tsb->tsa.ddpc.rcq;
5766                                 for (sct = 0; sct < 8; sct++) {
5767                                         len += sprintf(page + len, " %02x",
5768                                                        rcq[8 * sl + sct]);
5769                                 }
5770                                 len += sprintf(page + len, "\n");
5771                         }
5772                         sense = tsb->tsa.ddpc.sense;
5773                         break;
5774                 case 3: /* tsa_intrg */
5775                         len += sprintf(page + len, PRINTK_HEADER
5776                                       " tsb->tsa.intrg.: not supported yet\n");
5777                         break;
5778                 }
5779
5780                 if (sense) {
5781                         for (sl = 0; sl < 4; sl++) {
5782                                 len += sprintf(page + len, PRINTK_HEADER
5783                                                " Sense(hex) %2d-%2d:",
5784                                                (8 * sl), ((8 * sl) + 7));
5785                                 for (sct = 0; sct < 8; sct++) {
5786                                         len += sprintf(page + len, " %02x",
5787                                                        sense[8 * sl + sct]);
5788                                 }
5789                                 len += sprintf(page + len, "\n");
5790                         }
5791
5792                         if (sense[27] & DASD_SENSE_BIT_0) {
5793                                 /* 24 Byte Sense Data */
5794                                 sprintf(page + len, PRINTK_HEADER
5795                                         " 24 Byte: %x MSG %x, "
5796                                         "%s MSGb to SYSOP\n",
5797                                         sense[7] >> 4, sense[7] & 0x0f,
5798                                         sense[1] & 0x10 ? "" : "no");
5799                         } else {
5800                                 /* 32 Byte Sense Data */
5801                                 sprintf(page + len, PRINTK_HEADER
5802                                         " 32 Byte: Format: %x "
5803                                         "Exception class %x\n",
5804                                         sense[6] & 0x0f, sense[22] >> 4);
5805                         }
5806                 } else {
5807                         sprintf(page + len, PRINTK_HEADER
5808                                 " SORRY - NO VALID SENSE AVAILABLE\n");
5809                 }
5810         } else {
5811                 sprintf(page + len, PRINTK_HEADER
5812                         " SORRY - NO TSB DATA AVAILABLE\n");
5813         }
5814         printk(KERN_ERR "%s", page);
5815         free_page((unsigned long) page);
5816 }
5817
5818 static void dasd_eckd_dump_sense(struct dasd_device *device,
5819                                  struct dasd_ccw_req *req, struct irb *irb)
5820 {
5821         u8 *sense = dasd_get_sense(irb);
5822
5823         if (scsw_is_tm(&irb->scsw)) {
5824                 /*
5825                  * In some cases the 'File Protected' or 'Incorrect Length'
5826                  * error might be expected and log messages shouldn't be written
5827                  * then. Check if the according suppress bit is set.
5828                  */
5829                 if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5830                     test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5831                         return;
5832                 if (scsw_cstat(&irb->scsw) == 0x40 &&
5833                     test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5834                         return;
5835
5836                 dasd_eckd_dump_sense_tcw(device, req, irb);
5837         } else {
5838                 /*
5839                  * In some cases the 'Command Reject' or 'No Record Found'
5840                  * error might be expected and log messages shouldn't be
5841                  * written then. Check if the according suppress bit is set.
5842                  */
5843                 if (sense && sense[0] & SNS0_CMD_REJECT &&
5844                     test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5845                         return;
5846
5847                 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5848                     test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5849                         return;
5850
5851                 dasd_eckd_dump_sense_ccw(device, req, irb);
5852         }
5853 }
5854
5855 static int dasd_eckd_reload_device(struct dasd_device *device)
5856 {
5857         struct dasd_eckd_private *private = device->private;
5858         char print_uid[DASD_UID_STRLEN];
5859         int rc, old_base;
5860         struct dasd_uid uid;
5861         unsigned long flags;
5862
5863         /*
5864          * remove device from alias handling to prevent new requests
5865          * from being scheduled on the wrong alias device
5866          */
5867         dasd_alias_remove_device(device);
5868
5869         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5870         old_base = private->uid.base_unit_addr;
5871         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5872
5873         /* Read Configuration Data */
5874         rc = dasd_eckd_read_conf(device);
5875         if (rc)
5876                 goto out_err;
5877
5878         dasd_eckd_read_fc_security(device);
5879
5880         rc = dasd_eckd_generate_uid(device);
5881         if (rc)
5882                 goto out_err;
5883         /*
5884          * update unit address configuration and
5885          * add device to alias management
5886          */
5887         dasd_alias_update_add_device(device);
5888
5889         dasd_eckd_get_uid(device, &uid);
5890
5891         if (old_base != uid.base_unit_addr) {
5892                 dasd_eckd_get_uid_string(&private->conf, print_uid);
5893                 dev_info(&device->cdev->dev,
5894                          "An Alias device was reassigned to a new base device "
5895                          "with UID: %s\n", print_uid);
5896         }
5897         return 0;
5898
5899 out_err:
5900         return -1;
5901 }
5902
5903 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5904                                          struct dasd_rssd_messages *messages,
5905                                          __u8 lpum)
5906 {
5907         struct dasd_rssd_messages *message_buf;
5908         struct dasd_psf_prssd_data *prssdp;
5909         struct dasd_ccw_req *cqr;
5910         struct ccw1 *ccw;
5911         int rc;
5912
5913         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5914                                    (sizeof(struct dasd_psf_prssd_data) +
5915                                     sizeof(struct dasd_rssd_messages)),
5916                                    device, NULL);
5917         if (IS_ERR(cqr)) {
5918                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5919                                 "Could not allocate read message buffer request");
5920                 return PTR_ERR(cqr);
5921         }
5922
5923         cqr->lpm = lpum;
5924 retry:
5925         cqr->startdev = device;
5926         cqr->memdev = device;
5927         cqr->block = NULL;
5928         cqr->expires = 10 * HZ;
5929         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5930         /* dasd_sleep_on_immediatly does not do complex error
5931          * recovery so clear erp flag and set retry counter to
5932          * do basic erp */
5933         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5934         cqr->retries = 256;
5935
5936         /* Prepare for Read Subsystem Data */
5937         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5938         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5939         prssdp->order = PSF_ORDER_PRSSD;
5940         prssdp->suborder = 0x03;        /* Message Buffer */
5941         /* all other bytes of prssdp must be zero */
5942
5943         ccw = cqr->cpaddr;
5944         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5945         ccw->count = sizeof(struct dasd_psf_prssd_data);
5946         ccw->flags |= CCW_FLAG_CC;
5947         ccw->flags |= CCW_FLAG_SLI;
5948         ccw->cda = (__u32)virt_to_phys(prssdp);
5949
5950         /* Read Subsystem Data - message buffer */
5951         message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5952         memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5953
5954         ccw++;
5955         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5956         ccw->count = sizeof(struct dasd_rssd_messages);
5957         ccw->flags |= CCW_FLAG_SLI;
5958         ccw->cda = (__u32)virt_to_phys(message_buf);
5959
5960         cqr->buildclk = get_tod_clock();
5961         cqr->status = DASD_CQR_FILLED;
5962         rc = dasd_sleep_on_immediatly(cqr);
5963         if (rc == 0) {
5964                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5965                 message_buf = (struct dasd_rssd_messages *)
5966                         (prssdp + 1);
5967                 memcpy(messages, message_buf,
5968                        sizeof(struct dasd_rssd_messages));
5969         } else if (cqr->lpm) {
5970                 /*
5971                  * on z/VM we might not be able to do I/O on the requested path
5972                  * but instead we get the required information on any path
5973                  * so retry with open path mask
5974                  */
5975                 cqr->lpm = 0;
5976                 goto retry;
5977         } else
5978                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5979                                 "Reading messages failed with rc=%d\n"
5980                                 , rc);
5981         dasd_sfree_request(cqr, cqr->memdev);
5982         return rc;
5983 }
5984
5985 static int dasd_eckd_query_host_access(struct dasd_device *device,
5986                                        struct dasd_psf_query_host_access *data)
5987 {
5988         struct dasd_eckd_private *private = device->private;
5989         struct dasd_psf_query_host_access *host_access;
5990         struct dasd_psf_prssd_data *prssdp;
5991         struct dasd_ccw_req *cqr;
5992         struct ccw1 *ccw;
5993         int rc;
5994
5995         /* not available for HYPER PAV alias devices */
5996         if (!device->block && private->lcu->pav == HYPER_PAV)
5997                 return -EOPNOTSUPP;
5998
5999         /* may not be supported by the storage server */
6000         if (!(private->features.feature[14] & 0x80))
6001                 return -EOPNOTSUPP;
6002
6003         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
6004                                    sizeof(struct dasd_psf_prssd_data) + 1,
6005                                    device, NULL);
6006         if (IS_ERR(cqr)) {
6007                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6008                                 "Could not allocate read message buffer request");
6009                 return PTR_ERR(cqr);
6010         }
6011         host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
6012         if (!host_access) {
6013                 dasd_sfree_request(cqr, device);
6014                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6015                                 "Could not allocate host_access buffer");
6016                 return -ENOMEM;
6017         }
6018         cqr->startdev = device;
6019         cqr->memdev = device;
6020         cqr->block = NULL;
6021         cqr->retries = 256;
6022         cqr->expires = 10 * HZ;
6023
6024         /* Prepare for Read Subsystem Data */
6025         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
6026         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
6027         prssdp->order = PSF_ORDER_PRSSD;
6028         prssdp->suborder = PSF_SUBORDER_QHA;    /* query host access */
6029         /* LSS and Volume that will be queried */
6030         prssdp->lss = private->conf.ned->ID;
6031         prssdp->volume = private->conf.ned->unit_addr;
6032         /* all other bytes of prssdp must be zero */
6033
6034         ccw = cqr->cpaddr;
6035         ccw->cmd_code = DASD_ECKD_CCW_PSF;
6036         ccw->count = sizeof(struct dasd_psf_prssd_data);
6037         ccw->flags |= CCW_FLAG_CC;
6038         ccw->flags |= CCW_FLAG_SLI;
6039         ccw->cda = (__u32)virt_to_phys(prssdp);
6040
6041         /* Read Subsystem Data - query host access */
6042         ccw++;
6043         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
6044         ccw->count = sizeof(struct dasd_psf_query_host_access);
6045         ccw->flags |= CCW_FLAG_SLI;
6046         ccw->cda = (__u32)virt_to_phys(host_access);
6047
6048         cqr->buildclk = get_tod_clock();
6049         cqr->status = DASD_CQR_FILLED;
6050         /* the command might not be supported, suppress error message */
6051         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
6052         rc = dasd_sleep_on_interruptible(cqr);
6053         if (rc == 0) {
6054                 *data = *host_access;
6055         } else {
6056                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6057                                 "Reading host access data failed with rc=%d\n",
6058                                 rc);
6059                 rc = -EOPNOTSUPP;
6060         }
6061
6062         dasd_sfree_request(cqr, cqr->memdev);
6063         kfree(host_access);
6064         return rc;
6065 }
6066 /*
6067  * return number of grouped devices
6068  */
6069 static int dasd_eckd_host_access_count(struct dasd_device *device)
6070 {
6071         struct dasd_psf_query_host_access *access;
6072         struct dasd_ckd_path_group_entry *entry;
6073         struct dasd_ckd_host_information *info;
6074         int count = 0;
6075         int rc, i;
6076
6077         access = kzalloc(sizeof(*access), GFP_NOIO);
6078         if (!access) {
6079                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6080                                 "Could not allocate access buffer");
6081                 return -ENOMEM;
6082         }
6083         rc = dasd_eckd_query_host_access(device, access);
6084         if (rc) {
6085                 kfree(access);
6086                 return rc;
6087         }
6088
6089         info = (struct dasd_ckd_host_information *)
6090                 access->host_access_information;
6091         for (i = 0; i < info->entry_count; i++) {
6092                 entry = (struct dasd_ckd_path_group_entry *)
6093                         (info->entry + i * info->entry_size);
6094                 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
6095                         count++;
6096         }
6097
6098         kfree(access);
6099         return count;
6100 }
6101
6102 /*
6103  * write host access information to a sequential file
6104  */
6105 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
6106 {
6107         struct dasd_psf_query_host_access *access;
6108         struct dasd_ckd_path_group_entry *entry;
6109         struct dasd_ckd_host_information *info;
6110         char sysplex[9] = "";
6111         int rc, i;
6112
6113         access = kzalloc(sizeof(*access), GFP_NOIO);
6114         if (!access) {
6115                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6116                                 "Could not allocate access buffer");
6117                 return -ENOMEM;
6118         }
6119         rc = dasd_eckd_query_host_access(device, access);
6120         if (rc) {
6121                 kfree(access);
6122                 return rc;
6123         }
6124
6125         info = (struct dasd_ckd_host_information *)
6126                 access->host_access_information;
6127         for (i = 0; i < info->entry_count; i++) {
6128                 entry = (struct dasd_ckd_path_group_entry *)
6129                         (info->entry + i * info->entry_size);
6130                 /* PGID */
6131                 seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
6132                 /* FLAGS */
6133                 seq_printf(m, "status_flags %02x\n", entry->status_flags);
6134                 /* SYSPLEX NAME */
6135                 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
6136                 EBCASC(sysplex, sizeof(sysplex));
6137                 seq_printf(m, "sysplex_name %8s\n", sysplex);
6138                 /* SUPPORTED CYLINDER */
6139                 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
6140                 /* TIMESTAMP */
6141                 seq_printf(m, "timestamp %lu\n", (unsigned long)
6142                            entry->timestamp);
6143         }
6144         kfree(access);
6145
6146         return 0;
6147 }
6148
6149 static struct dasd_device
6150 *copy_relation_find_device(struct dasd_copy_relation *copy,
6151                            char *busid)
6152 {
6153         int i;
6154
6155         for (i = 0; i < DASD_CP_ENTRIES; i++) {
6156                 if (copy->entry[i].configured &&
6157                     strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
6158                         return copy->entry[i].device;
6159         }
6160         return NULL;
6161 }
6162
6163 /*
6164  * set the new active/primary device
6165  */
6166 static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busid,
6167                                  char *old_busid)
6168 {
6169         int i;
6170
6171         for (i = 0; i < DASD_CP_ENTRIES; i++) {
6172                 if (copy->entry[i].configured &&
6173                     strncmp(copy->entry[i].busid, new_busid,
6174                             DASD_BUS_ID_SIZE) == 0) {
6175                         copy->active = &copy->entry[i];
6176                         copy->entry[i].primary = true;
6177                 } else if (copy->entry[i].configured &&
6178                            strncmp(copy->entry[i].busid, old_busid,
6179                                    DASD_BUS_ID_SIZE) == 0) {
6180                         copy->entry[i].primary = false;
6181                 }
6182         }
6183 }
6184
6185 /*
6186  * The function will swap the role of a given copy pair.
6187  * During the swap operation the relation of the blockdevice is disconnected
6188  * from the old primary and connected to the new.
6189  *
6190  * IO is paused on the block queue before swap and may be resumed afterwards.
6191  */
6192 static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
6193                                     char *sec_busid)
6194 {
6195         struct dasd_device *primary, *secondary;
6196         struct dasd_copy_relation *copy;
6197         struct dasd_block *block;
6198         struct gendisk *gdp;
6199
6200         copy = device->copy;
6201         if (!copy)
6202                 return DASD_COPYPAIRSWAP_INVALID;
6203         primary = copy->active->device;
6204         if (!primary)
6205                 return DASD_COPYPAIRSWAP_INVALID;
6206         /* double check if swap has correct primary */
6207         if (strncmp(dev_name(&primary->cdev->dev), prim_busid, DASD_BUS_ID_SIZE) != 0)
6208                 return DASD_COPYPAIRSWAP_PRIMARY;
6209
6210         secondary = copy_relation_find_device(copy, sec_busid);
6211         if (!secondary)
6212                 return DASD_COPYPAIRSWAP_SECONDARY;
6213
6214         /*
6215          * usually the device should be quiesced for swap
6216          * for paranoia stop device and requeue requests again
6217          */
6218         dasd_device_set_stop_bits(primary, DASD_STOPPED_PPRC);
6219         dasd_device_set_stop_bits(secondary, DASD_STOPPED_PPRC);
6220         dasd_generic_requeue_all_requests(primary);
6221
6222         /* swap DASD internal device <> block assignment */
6223         block = primary->block;
6224         primary->block = NULL;
6225         secondary->block = block;
6226         block->base = secondary;
6227         /* set new primary device in COPY relation */
6228         copy_pair_set_active(copy, sec_busid, prim_busid);
6229
6230         /* swap blocklayer device link */
6231         gdp = block->gdp;
6232         dasd_add_link_to_gendisk(gdp, secondary);
6233
6234         /* re-enable device */
6235         dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
6236         dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
6237         dasd_schedule_device_bh(secondary);
6238
6239         return DASD_COPYPAIRSWAP_SUCCESS;
6240 }
6241
6242 /*
6243  * Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query
6244  */
6245 static int dasd_eckd_query_pprc_status(struct dasd_device *device,
6246                                        struct dasd_pprc_data_sc4 *data)
6247 {
6248         struct dasd_pprc_data_sc4 *pprc_data;
6249         struct dasd_psf_prssd_data *prssdp;
6250         struct dasd_ccw_req *cqr;
6251         struct ccw1 *ccw;
6252         int rc;
6253
6254         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
6255                                    sizeof(*prssdp) + sizeof(*pprc_data) + 1,
6256                                    device, NULL);
6257         if (IS_ERR(cqr)) {
6258                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6259                                 "Could not allocate query PPRC status request");
6260                 return PTR_ERR(cqr);
6261         }
6262         cqr->startdev = device;
6263         cqr->memdev = device;
6264         cqr->block = NULL;
6265         cqr->retries = 256;
6266         cqr->expires = 10 * HZ;
6267
6268         /* Prepare for Read Subsystem Data */
6269         prssdp = (struct dasd_psf_prssd_data *)cqr->data;
6270         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
6271         prssdp->order = PSF_ORDER_PRSSD;
6272         prssdp->suborder = PSF_SUBORDER_PPRCEQ;
6273         prssdp->varies[0] = PPRCEQ_SCOPE_4;
6274         pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1);
6275
6276         ccw = cqr->cpaddr;
6277         ccw->cmd_code = DASD_ECKD_CCW_PSF;
6278         ccw->count = sizeof(struct dasd_psf_prssd_data);
6279         ccw->flags |= CCW_FLAG_CC;
6280         ccw->flags |= CCW_FLAG_SLI;
6281         ccw->cda = (__u32)(addr_t)prssdp;
6282
6283         /* Read Subsystem Data - query host access */
6284         ccw++;
6285         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
6286         ccw->count = sizeof(*pprc_data);
6287         ccw->flags |= CCW_FLAG_SLI;
6288         ccw->cda = (__u32)(addr_t)pprc_data;
6289
6290         cqr->buildclk = get_tod_clock();
6291         cqr->status = DASD_CQR_FILLED;
6292
6293         rc = dasd_sleep_on_interruptible(cqr);
6294         if (rc == 0) {
6295                 *data = *pprc_data;
6296         } else {
6297                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6298                                 "PPRC Extended Query failed with rc=%d\n",
6299                                 rc);
6300                 rc = -EOPNOTSUPP;
6301         }
6302
6303         dasd_sfree_request(cqr, cqr->memdev);
6304         return rc;
6305 }
6306
6307 /*
6308  * ECKD NOP - no operation
6309  */
6310 static int dasd_eckd_nop(struct dasd_device *device)
6311 {
6312         struct dasd_ccw_req *cqr;
6313         struct ccw1 *ccw;
6314         int rc;
6315
6316         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL);
6317         if (IS_ERR(cqr)) {
6318                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6319                                 "Could not allocate NOP request");
6320                 return PTR_ERR(cqr);
6321         }
6322         cqr->startdev = device;
6323         cqr->memdev = device;
6324         cqr->block = NULL;
6325         cqr->retries = 1;
6326         cqr->expires = 10 * HZ;
6327
6328         ccw = cqr->cpaddr;
6329         ccw->cmd_code = DASD_ECKD_CCW_NOP;
6330         ccw->flags |= CCW_FLAG_SLI;
6331
6332         cqr->buildclk = get_tod_clock();
6333         cqr->status = DASD_CQR_FILLED;
6334
6335         rc = dasd_sleep_on_interruptible(cqr);
6336         if (rc != 0) {
6337                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6338                                 "NOP failed with rc=%d\n", rc);
6339                 rc = -EOPNOTSUPP;
6340         }
6341         dasd_sfree_request(cqr, cqr->memdev);
6342         return rc;
6343 }
6344
6345 static int dasd_eckd_device_ping(struct dasd_device *device)
6346 {
6347         return dasd_eckd_nop(device);
6348 }
6349
6350 /*
6351  * Perform Subsystem Function - CUIR response
6352  */
6353 static int
6354 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
6355                             __u32 message_id, __u8 lpum)
6356 {
6357         struct dasd_psf_cuir_response *psf_cuir;
6358         int pos = pathmask_to_pos(lpum);
6359         struct dasd_ccw_req *cqr;
6360         struct ccw1 *ccw;
6361         int rc;
6362
6363         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
6364                                    sizeof(struct dasd_psf_cuir_response),
6365                                    device, NULL);
6366
6367         if (IS_ERR(cqr)) {
6368                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6369                            "Could not allocate PSF-CUIR request");
6370                 return PTR_ERR(cqr);
6371         }
6372
6373         psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6374         psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
6375         psf_cuir->cc = response;
6376         psf_cuir->chpid = device->path[pos].chpid;
6377         psf_cuir->message_id = message_id;
6378         psf_cuir->cssid = device->path[pos].cssid;
6379         psf_cuir->ssid = device->path[pos].ssid;
6380         ccw = cqr->cpaddr;
6381         ccw->cmd_code = DASD_ECKD_CCW_PSF;
6382         ccw->cda = (__u32)virt_to_phys(psf_cuir);
6383         ccw->flags = CCW_FLAG_SLI;
6384         ccw->count = sizeof(struct dasd_psf_cuir_response);
6385
6386         cqr->startdev = device;
6387         cqr->memdev = device;
6388         cqr->block = NULL;
6389         cqr->retries = 256;
6390         cqr->expires = 10*HZ;
6391         cqr->buildclk = get_tod_clock();
6392         cqr->status = DASD_CQR_FILLED;
6393         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6394
6395         rc = dasd_sleep_on(cqr);
6396
6397         dasd_sfree_request(cqr, cqr->memdev);
6398         return rc;
6399 }
6400
6401 /*
6402  * return configuration data that is referenced by record selector
6403  * if a record selector is specified or per default return the
6404  * conf_data pointer for the path specified by lpum
6405  */
6406 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6407                                                      __u8 lpum,
6408                                                      struct dasd_cuir_message *cuir)
6409 {
6410         struct dasd_conf_data *conf_data;
6411         int path, pos;
6412
6413         if (cuir->record_selector == 0)
6414                 goto out;
6415         for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6416                 conf_data = device->path[pos].conf_data;
6417                 if (conf_data->gneq.record_selector ==
6418                     cuir->record_selector)
6419                         return conf_data;
6420         }
6421 out:
6422         return device->path[pathmask_to_pos(lpum)].conf_data;
6423 }
6424
6425 /*
6426  * This function determines the scope of a reconfiguration request by
6427  * analysing the path and device selection data provided in the CUIR request.
6428  * Returns a path mask containing CUIR affected paths for the give device.
6429  *
6430  * If the CUIR request does not contain the required information return the
6431  * path mask of the path the attention message for the CUIR request was reveived
6432  * on.
6433  */
6434 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6435                                 struct dasd_cuir_message *cuir)
6436 {
6437         struct dasd_conf_data *ref_conf_data;
6438         unsigned long bitmask = 0, mask = 0;
6439         struct dasd_conf_data *conf_data;
6440         unsigned int pos, path;
6441         char *ref_gneq, *gneq;
6442         char *ref_ned, *ned;
6443         int tbcpm = 0;
6444
6445         /* if CUIR request does not specify the scope use the path
6446            the attention message was presented on */
6447         if (!cuir->ned_map ||
6448             !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6449                 return lpum;
6450
6451         /* get reference conf data */
6452         ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6453         /* reference ned is determined by ned_map field */
6454         pos = 8 - ffs(cuir->ned_map);
6455         ref_ned = (char *)&ref_conf_data->neds[pos];
6456         ref_gneq = (char *)&ref_conf_data->gneq;
6457         /* transfer 24 bit neq_map to mask */
6458         mask = cuir->neq_map[2];
6459         mask |= cuir->neq_map[1] << 8;
6460         mask |= cuir->neq_map[0] << 16;
6461
6462         for (path = 0; path < 8; path++) {
6463                 /* initialise data per path */
6464                 bitmask = mask;
6465                 conf_data = device->path[path].conf_data;
6466                 pos = 8 - ffs(cuir->ned_map);
6467                 ned = (char *) &conf_data->neds[pos];
6468                 /* compare reference ned and per path ned */
6469                 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6470                         continue;
6471                 gneq = (char *)&conf_data->gneq;
6472                 /* compare reference gneq and per_path gneq under
6473                    24 bit mask where mask bit 0 equals byte 7 of
6474                    the gneq and mask bit 24 equals byte 31 */
6475                 while (bitmask) {
6476                         pos = ffs(bitmask) - 1;
6477                         if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6478                             != 0)
6479                                 break;
6480                         clear_bit(pos, &bitmask);
6481                 }
6482                 if (bitmask)
6483                         continue;
6484                 /* device and path match the reference values
6485                    add path to CUIR scope */
6486                 tbcpm |= 0x80 >> path;
6487         }
6488         return tbcpm;
6489 }
6490
6491 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6492                                        unsigned long paths, int action)
6493 {
6494         int pos;
6495
6496         while (paths) {
6497                 /* get position of bit in mask */
6498                 pos = 8 - ffs(paths);
6499                 /* get channel path descriptor from this position */
6500                 if (action == CUIR_QUIESCE)
6501                         pr_warn("Service on the storage server caused path %x.%02x to go offline",
6502                                 device->path[pos].cssid,
6503                                 device->path[pos].chpid);
6504                 else if (action == CUIR_RESUME)
6505                         pr_info("Path %x.%02x is back online after service on the storage server",
6506                                 device->path[pos].cssid,
6507                                 device->path[pos].chpid);
6508                 clear_bit(7 - pos, &paths);
6509         }
6510 }
6511
6512 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6513                                       struct dasd_cuir_message *cuir)
6514 {
6515         unsigned long tbcpm;
6516
6517         tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6518         /* nothing to do if path is not in use */
6519         if (!(dasd_path_get_opm(device) & tbcpm))
6520                 return 0;
6521         if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6522                 /* no path would be left if the CUIR action is taken
6523                    return error */
6524                 return -EINVAL;
6525         }
6526         /* remove device from operational path mask */
6527         dasd_path_remove_opm(device, tbcpm);
6528         dasd_path_add_cuirpm(device, tbcpm);
6529         return tbcpm;
6530 }
6531
6532 /*
6533  * walk through all devices and build a path mask to quiesce them
6534  * return an error if the last path to a device would be removed
6535  *
6536  * if only part of the devices are quiesced and an error
6537  * occurs no onlining necessary, the storage server will
6538  * notify the already set offline devices again
6539  */
6540 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6541                                   struct dasd_cuir_message *cuir)
6542 {
6543         struct dasd_eckd_private *private = device->private;
6544         struct alias_pav_group *pavgroup, *tempgroup;
6545         struct dasd_device *dev, *n;
6546         unsigned long paths = 0;
6547         unsigned long flags;
6548         int tbcpm;
6549
6550         /* active devices */
6551         list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6552                                  alias_list) {
6553                 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6554                 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6555                 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6556                 if (tbcpm < 0)
6557                         goto out_err;
6558                 paths |= tbcpm;
6559         }
6560         /* inactive devices */
6561         list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6562                                  alias_list) {
6563                 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6564                 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6565                 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6566                 if (tbcpm < 0)
6567                         goto out_err;
6568                 paths |= tbcpm;
6569         }
6570         /* devices in PAV groups */
6571         list_for_each_entry_safe(pavgroup, tempgroup,
6572                                  &private->lcu->grouplist, group) {
6573                 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6574                                          alias_list) {
6575                         spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6576                         tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6577                         spin_unlock_irqrestore(
6578                                 get_ccwdev_lock(dev->cdev), flags);
6579                         if (tbcpm < 0)
6580                                 goto out_err;
6581                         paths |= tbcpm;
6582                 }
6583                 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6584                                          alias_list) {
6585                         spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6586                         tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6587                         spin_unlock_irqrestore(
6588                                 get_ccwdev_lock(dev->cdev), flags);
6589                         if (tbcpm < 0)
6590                                 goto out_err;
6591                         paths |= tbcpm;
6592                 }
6593         }
6594         /* notify user about all paths affected by CUIR action */
6595         dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6596         return 0;
6597 out_err:
6598         return tbcpm;
6599 }
6600
6601 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6602                                  struct dasd_cuir_message *cuir)
6603 {
6604         struct dasd_eckd_private *private = device->private;
6605         struct alias_pav_group *pavgroup, *tempgroup;
6606         struct dasd_device *dev, *n;
6607         unsigned long paths = 0;
6608         int tbcpm;
6609
6610         /*
6611          * the path may have been added through a generic path event before
6612          * only trigger path verification if the path is not already in use
6613          */
6614         list_for_each_entry_safe(dev, n,
6615                                  &private->lcu->active_devices,
6616                                  alias_list) {
6617                 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6618                 paths |= tbcpm;
6619                 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6620                         dasd_path_add_tbvpm(dev, tbcpm);
6621                         dasd_schedule_device_bh(dev);
6622                 }
6623         }
6624         list_for_each_entry_safe(dev, n,
6625                                  &private->lcu->inactive_devices,
6626                                  alias_list) {
6627                 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6628                 paths |= tbcpm;
6629                 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6630                         dasd_path_add_tbvpm(dev, tbcpm);
6631                         dasd_schedule_device_bh(dev);
6632                 }
6633         }
6634         /* devices in PAV groups */
6635         list_for_each_entry_safe(pavgroup, tempgroup,
6636                                  &private->lcu->grouplist,
6637                                  group) {
6638                 list_for_each_entry_safe(dev, n,
6639                                          &pavgroup->baselist,
6640                                          alias_list) {
6641                         tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6642                         paths |= tbcpm;
6643                         if (!(dasd_path_get_opm(dev) & tbcpm)) {
6644                                 dasd_path_add_tbvpm(dev, tbcpm);
6645                                 dasd_schedule_device_bh(dev);
6646                         }
6647                 }
6648                 list_for_each_entry_safe(dev, n,
6649                                          &pavgroup->aliaslist,
6650                                          alias_list) {
6651                         tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6652                         paths |= tbcpm;
6653                         if (!(dasd_path_get_opm(dev) & tbcpm)) {
6654                                 dasd_path_add_tbvpm(dev, tbcpm);
6655                                 dasd_schedule_device_bh(dev);
6656                         }
6657                 }
6658         }
6659         /* notify user about all paths affected by CUIR action */
6660         dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6661         return 0;
6662 }
6663
6664 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6665                                  __u8 lpum)
6666 {
6667         struct dasd_cuir_message *cuir = messages;
6668         int response;
6669
6670         DBF_DEV_EVENT(DBF_WARNING, device,
6671                       "CUIR request: %016llx %016llx %016llx %08x",
6672                       ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6673                       ((u32 *)cuir)[3]);
6674
6675         if (cuir->code == CUIR_QUIESCE) {
6676                 /* quiesce */
6677                 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6678                         response = PSF_CUIR_LAST_PATH;
6679                 else
6680                         response = PSF_CUIR_COMPLETED;
6681         } else if (cuir->code == CUIR_RESUME) {
6682                 /* resume */
6683                 dasd_eckd_cuir_resume(device, lpum, cuir);
6684                 response = PSF_CUIR_COMPLETED;
6685         } else
6686                 response = PSF_CUIR_NOT_SUPPORTED;
6687
6688         dasd_eckd_psf_cuir_response(device, response,
6689                                     cuir->message_id, lpum);
6690         DBF_DEV_EVENT(DBF_WARNING, device,
6691                       "CUIR response: %d on message ID %08x", response,
6692                       cuir->message_id);
6693         /* to make sure there is no attention left schedule work again */
6694         device->discipline->check_attention(device, lpum);
6695 }
6696
6697 static void dasd_eckd_oos_resume(struct dasd_device *device)
6698 {
6699         struct dasd_eckd_private *private = device->private;
6700         struct alias_pav_group *pavgroup, *tempgroup;
6701         struct dasd_device *dev, *n;
6702         unsigned long flags;
6703
6704         spin_lock_irqsave(&private->lcu->lock, flags);
6705         list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6706                                  alias_list) {
6707                 if (dev->stopped & DASD_STOPPED_NOSPC)
6708                         dasd_generic_space_avail(dev);
6709         }
6710         list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6711                                  alias_list) {
6712                 if (dev->stopped & DASD_STOPPED_NOSPC)
6713                         dasd_generic_space_avail(dev);
6714         }
6715         /* devices in PAV groups */
6716         list_for_each_entry_safe(pavgroup, tempgroup,
6717                                  &private->lcu->grouplist,
6718                                  group) {
6719                 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6720                                          alias_list) {
6721                         if (dev->stopped & DASD_STOPPED_NOSPC)
6722                                 dasd_generic_space_avail(dev);
6723                 }
6724                 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6725                                          alias_list) {
6726                         if (dev->stopped & DASD_STOPPED_NOSPC)
6727                                 dasd_generic_space_avail(dev);
6728                 }
6729         }
6730         spin_unlock_irqrestore(&private->lcu->lock, flags);
6731 }
6732
6733 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6734                                  __u8 lpum)
6735 {
6736         struct dasd_oos_message *oos = messages;
6737
6738         switch (oos->code) {
6739         case REPO_WARN:
6740         case POOL_WARN:
6741                 dev_warn(&device->cdev->dev,
6742                          "Extent pool usage has reached a critical value\n");
6743                 dasd_eckd_oos_resume(device);
6744                 break;
6745         case REPO_EXHAUST:
6746         case POOL_EXHAUST:
6747                 dev_warn(&device->cdev->dev,
6748                          "Extent pool is exhausted\n");
6749                 break;
6750         case REPO_RELIEVE:
6751         case POOL_RELIEVE:
6752                 dev_info(&device->cdev->dev,
6753                          "Extent pool physical space constraint has been relieved\n");
6754                 break;
6755         }
6756
6757         /* In any case, update related data */
6758         dasd_eckd_read_ext_pool_info(device);
6759
6760         /* to make sure there is no attention left schedule work again */
6761         device->discipline->check_attention(device, lpum);
6762 }
6763
6764 static void dasd_eckd_check_attention_work(struct work_struct *work)
6765 {
6766         struct check_attention_work_data *data;
6767         struct dasd_rssd_messages *messages;
6768         struct dasd_device *device;
6769         int rc;
6770
6771         data = container_of(work, struct check_attention_work_data, worker);
6772         device = data->device;
6773         messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6774         if (!messages) {
6775                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6776                               "Could not allocate attention message buffer");
6777                 goto out;
6778         }
6779         rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6780         if (rc)
6781                 goto out;
6782
6783         if (messages->length == ATTENTION_LENGTH_CUIR &&
6784             messages->format == ATTENTION_FORMAT_CUIR)
6785                 dasd_eckd_handle_cuir(device, messages, data->lpum);
6786         if (messages->length == ATTENTION_LENGTH_OOS &&
6787             messages->format == ATTENTION_FORMAT_OOS)
6788                 dasd_eckd_handle_oos(device, messages, data->lpum);
6789
6790 out:
6791         dasd_put_device(device);
6792         kfree(messages);
6793         kfree(data);
6794 }
6795
6796 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6797 {
6798         struct check_attention_work_data *data;
6799
6800         data = kzalloc(sizeof(*data), GFP_ATOMIC);
6801         if (!data)
6802                 return -ENOMEM;
6803         INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6804         dasd_get_device(device);
6805         data->device = device;
6806         data->lpum = lpum;
6807         schedule_work(&data->worker);
6808         return 0;
6809 }
6810
6811 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6812 {
6813         if (~lpum & dasd_path_get_opm(device)) {
6814                 dasd_path_add_nohpfpm(device, lpum);
6815                 dasd_path_remove_opm(device, lpum);
6816                 dev_err(&device->cdev->dev,
6817                         "Channel path %02X lost HPF functionality and is disabled\n",
6818                         lpum);
6819                 return 1;
6820         }
6821         return 0;
6822 }
6823
6824 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6825 {
6826         struct dasd_eckd_private *private = device->private;
6827
6828         dev_err(&device->cdev->dev,
6829                 "High Performance FICON disabled\n");
6830         private->fcx_max_data = 0;
6831 }
6832
6833 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6834 {
6835         struct dasd_eckd_private *private = device->private;
6836
6837         return private->fcx_max_data ? 1 : 0;
6838 }
6839
6840 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6841                                        struct irb *irb)
6842 {
6843         struct dasd_eckd_private *private = device->private;
6844
6845         if (!private->fcx_max_data) {
6846                 /* sanity check for no HPF, the error makes no sense */
6847                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6848                               "Trying to disable HPF for a non HPF device");
6849                 return;
6850         }
6851         if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6852                 dasd_eckd_disable_hpf_device(device);
6853         } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6854                 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6855                         return;
6856                 dasd_eckd_disable_hpf_device(device);
6857                 dasd_path_set_tbvpm(device,
6858                                   dasd_path_get_hpfpm(device));
6859         }
6860         /*
6861          * prevent that any new I/O ist started on the device and schedule a
6862          * requeue of existing requests
6863          */
6864         dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6865         dasd_schedule_requeue(device);
6866 }
6867
6868 /*
6869  * Initialize block layer request queue.
6870  */
6871 static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6872 {
6873         unsigned int logical_block_size = block->bp_block;
6874         struct request_queue *q = block->gdp->queue;
6875         struct dasd_device *device = block->base;
6876         int max;
6877
6878         if (device->features & DASD_FEATURE_USERAW) {
6879                 /*
6880                  * the max_blocks value for raw_track access is 256
6881                  * it is higher than the native ECKD value because we
6882                  * only need one ccw per track
6883                  * so the max_hw_sectors are
6884                  * 2048 x 512B = 1024kB = 16 tracks
6885                  */
6886                 max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6887         } else {
6888                 max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6889         }
6890         blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
6891         q->limits.max_dev_sectors = max;
6892         blk_queue_logical_block_size(q, logical_block_size);
6893         blk_queue_max_hw_sectors(q, max);
6894         blk_queue_max_segments(q, USHRT_MAX);
6895         /* With page sized segments each segment can be translated into one idaw/tidaw */
6896         blk_queue_max_segment_size(q, PAGE_SIZE);
6897         blk_queue_segment_boundary(q, PAGE_SIZE - 1);
6898         blk_queue_dma_alignment(q, PAGE_SIZE - 1);
6899 }
6900
6901 static struct ccw_driver dasd_eckd_driver = {
6902         .driver = {
6903                 .name   = "dasd-eckd",
6904                 .owner  = THIS_MODULE,
6905                 .dev_groups = dasd_dev_groups,
6906         },
6907         .ids         = dasd_eckd_ids,
6908         .probe       = dasd_eckd_probe,
6909         .remove      = dasd_generic_remove,
6910         .set_offline = dasd_generic_set_offline,
6911         .set_online  = dasd_eckd_set_online,
6912         .notify      = dasd_generic_notify,
6913         .path_event  = dasd_generic_path_event,
6914         .shutdown    = dasd_generic_shutdown,
6915         .uc_handler  = dasd_generic_uc_handler,
6916         .int_class   = IRQIO_DAS,
6917 };
6918
6919 static struct dasd_discipline dasd_eckd_discipline = {
6920         .owner = THIS_MODULE,
6921         .name = "ECKD",
6922         .ebcname = "ECKD",
6923         .check_device = dasd_eckd_check_characteristics,
6924         .uncheck_device = dasd_eckd_uncheck_device,
6925         .do_analysis = dasd_eckd_do_analysis,
6926         .pe_handler = dasd_eckd_pe_handler,
6927         .basic_to_ready = dasd_eckd_basic_to_ready,
6928         .online_to_ready = dasd_eckd_online_to_ready,
6929         .basic_to_known = dasd_eckd_basic_to_known,
6930         .setup_blk_queue = dasd_eckd_setup_blk_queue,
6931         .fill_geometry = dasd_eckd_fill_geometry,
6932         .start_IO = dasd_start_IO,
6933         .term_IO = dasd_term_IO,
6934         .handle_terminated_request = dasd_eckd_handle_terminated_request,
6935         .format_device = dasd_eckd_format_device,
6936         .check_device_format = dasd_eckd_check_device_format,
6937         .erp_action = dasd_eckd_erp_action,
6938         .erp_postaction = dasd_eckd_erp_postaction,
6939         .check_for_device_change = dasd_eckd_check_for_device_change,
6940         .build_cp = dasd_eckd_build_alias_cp,
6941         .free_cp = dasd_eckd_free_alias_cp,
6942         .dump_sense = dasd_eckd_dump_sense,
6943         .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6944         .fill_info = dasd_eckd_fill_info,
6945         .ioctl = dasd_eckd_ioctl,
6946         .reload = dasd_eckd_reload_device,
6947         .get_uid = dasd_eckd_get_uid,
6948         .kick_validate = dasd_eckd_kick_validate_server,
6949         .check_attention = dasd_eckd_check_attention,
6950         .host_access_count = dasd_eckd_host_access_count,
6951         .hosts_print = dasd_hosts_print,
6952         .handle_hpf_error = dasd_eckd_handle_hpf_error,
6953         .disable_hpf = dasd_eckd_disable_hpf_device,
6954         .hpf_enabled = dasd_eckd_hpf_enabled,
6955         .reset_path = dasd_eckd_reset_path,
6956         .is_ese = dasd_eckd_is_ese,
6957         .space_allocated = dasd_eckd_space_allocated,
6958         .space_configured = dasd_eckd_space_configured,
6959         .logical_capacity = dasd_eckd_logical_capacity,
6960         .release_space = dasd_eckd_release_space,
6961         .ext_pool_id = dasd_eckd_ext_pool_id,
6962         .ext_size = dasd_eckd_ext_size,
6963         .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6964         .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6965         .ext_pool_oos = dasd_eckd_ext_pool_oos,
6966         .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6967         .ese_format = dasd_eckd_ese_format,
6968         .ese_read = dasd_eckd_ese_read,
6969         .pprc_status = dasd_eckd_query_pprc_status,
6970         .pprc_enabled = dasd_eckd_pprc_enabled,
6971         .copy_pair_swap = dasd_eckd_copy_pair_swap,
6972         .device_ping = dasd_eckd_device_ping,
6973 };
6974
6975 static int __init
6976 dasd_eckd_init(void)
6977 {
6978         int ret;
6979
6980         ASCEBC(dasd_eckd_discipline.ebcname, 4);
6981         dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6982                                    GFP_KERNEL | GFP_DMA);
6983         if (!dasd_reserve_req)
6984                 return -ENOMEM;
6985         dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6986                                     GFP_KERNEL | GFP_DMA);
6987         if (!dasd_vol_info_req) {
6988                 kfree(dasd_reserve_req);
6989                 return -ENOMEM;
6990         }
6991         pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
6992                                     GFP_KERNEL | GFP_DMA);
6993         if (!pe_handler_worker) {
6994                 kfree(dasd_reserve_req);
6995                 kfree(dasd_vol_info_req);
6996                 return -ENOMEM;
6997         }
6998         rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6999         if (!rawpadpage) {
7000                 kfree(pe_handler_worker);
7001                 kfree(dasd_reserve_req);
7002                 kfree(dasd_vol_info_req);
7003                 return -ENOMEM;
7004         }
7005         ret = ccw_driver_register(&dasd_eckd_driver);
7006         if (!ret)
7007                 wait_for_device_probe();
7008         else {
7009                 kfree(pe_handler_worker);
7010                 kfree(dasd_reserve_req);
7011                 kfree(dasd_vol_info_req);
7012                 free_page((unsigned long)rawpadpage);
7013         }
7014         return ret;
7015 }
7016
7017 static void __exit
7018 dasd_eckd_cleanup(void)
7019 {
7020         ccw_driver_unregister(&dasd_eckd_driver);
7021         kfree(pe_handler_worker);
7022         kfree(dasd_reserve_req);
7023         free_page((unsigned long)rawpadpage);
7024 }
7025
7026 module_init(dasd_eckd_init);
7027 module_exit(dasd_eckd_cleanup);