s390/dasd: Add new ioctl to release space
[linux-block.git] / drivers / s390 / block / dasd_eckd.c
CommitLineData
6a55d2cd 1// SPDX-License-Identifier: GPL-2.0
138c014d 2/*
1da177e4 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
138c014d 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
1da177e4
LT
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
d41dd122 8 * Copyright IBM Corp. 1999, 2009
ab1d848f
NH
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
1da177e4
LT
11 */
12
ca99dab0 13#define KMSG_COMPONENT "dasd-eckd"
fc19f381 14
1da177e4
LT
15#include <linux/stddef.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/hdreg.h> /* HDIO_GETGEO */
19#include <linux/bio.h>
20#include <linux/module.h>
048cd4e5 21#include <linux/compat.h>
1da177e4 22#include <linux/init.h>
5a3b7b11 23#include <linux/seq_file.h>
1da177e4 24
382b7366 25#include <asm/css_chars.h>
1da177e4
LT
26#include <asm/debug.h>
27#include <asm/idals.h>
28#include <asm/ebcdic.h>
29#include <asm/io.h>
7c0f6ba6 30#include <linux/uaccess.h>
40545573 31#include <asm/cio.h>
1da177e4 32#include <asm/ccwdev.h>
f3eb5384 33#include <asm/itcw.h>
5db8440c
SH
34#include <asm/schid.h>
35#include <asm/chpid.h>
1da177e4
LT
36
37#include "dasd_int.h"
38#include "dasd_eckd.h"
39
40#ifdef PRINTK_HEADER
41#undef PRINTK_HEADER
42#endif /* PRINTK_HEADER */
43#define PRINTK_HEADER "dasd(eckd):"
44
e4dbb0f2
SH
45/*
46 * raw track access always map to 64k in memory
47 * so it maps to 16 blocks of 4k per track
48 */
49#define DASD_RAW_BLOCK_PER_TRACK 16
50#define DASD_RAW_BLOCKSIZE 4096
51/* 64k are 128 x 512 byte sectors */
52#define DASD_RAW_SECTORS_PER_TRACK 128
53
1da177e4
LT
54MODULE_LICENSE("GPL");
55
56static struct dasd_discipline dasd_eckd_discipline;
57
1da177e4
LT
58/* The ccw bus type uses this table to find devices that it sends to
59 * dasd_eckd_probe */
60static struct ccw_device_id dasd_eckd_ids[] = {
d2c993d8
HC
61 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
62 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
5da24b76 63 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
d2c993d8
HC
64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
66 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
67 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
68 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
69 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
70 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
1da177e4
LT
71 { /* end of list */ },
72};
73
74MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
75
76static struct ccw_driver dasd_eckd_driver; /* see below */
77
558b9ef0
SW
78static void *rawpadpage;
79
eb6e199b
SW
80#define INIT_CQR_OK 0
81#define INIT_CQR_UNFORMATTED 1
82#define INIT_CQR_ERROR 2
83
f932bcea
SW
84/* emergency request for reserve/release */
85static struct {
86 struct dasd_ccw_req cqr;
87 struct ccw1 ccw;
88 char data[32];
89} *dasd_reserve_req;
90static DEFINE_MUTEX(dasd_reserve_mutex);
91
a4d26c6a
SW
92/* definitions for the path verification worker */
93struct path_verification_work_data {
94 struct work_struct worker;
95 struct dasd_device *device;
96 struct dasd_ccw_req cqr;
97 struct ccw1 ccw;
98 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
99 int isglobal;
100 __u8 tbvpm;
101};
102static struct path_verification_work_data *path_verification_worker;
103static DEFINE_MUTEX(dasd_path_verification_mutex);
eb6e199b 104
5db8440c
SH
105struct check_attention_work_data {
106 struct work_struct worker;
107 struct dasd_device *device;
108 __u8 lpum;
109};
110
c729696b 111static int dasd_eckd_ext_pool_id(struct dasd_device *);
8fd57520
JH
112static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
113 struct dasd_device *, struct dasd_device *,
114 unsigned int, int, unsigned int, unsigned int,
115 unsigned int, unsigned int);
116
1da177e4
LT
117/* initial attempt at a probe function. this can be simplified once
118 * the other detection code is gone */
119static int
120dasd_eckd_probe (struct ccw_device *cdev)
121{
122 int ret;
123
40545573 124 /* set ECKD specific ccw-device options */
454e1fa1
PO
125 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
126 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
40545573 127 if (ret) {
b8ed5dd5
SH
128 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
129 "dasd_eckd_probe: could not set "
130 "ccw-device options");
1da177e4 131 return ret;
40545573
HH
132 }
133 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
134 return ret;
1da177e4
LT
135}
136
137static int
138dasd_eckd_set_online(struct ccw_device *cdev)
139{
40545573 140 return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
1da177e4
LT
141}
142
1da177e4
LT
143static const int sizes_trk0[] = { 28, 148, 84 };
144#define LABEL_SIZE 140
145
3bc9fef9 146/* head and record addresses of count_area read in analysis ccw */
ce6915f5 147static const int count_area_head[] = { 0, 0, 0, 0, 1 };
3bc9fef9
SH
148static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
149
1da177e4
LT
150static inline unsigned int
151ceil_quot(unsigned int d1, unsigned int d2)
152{
153 return (d1 + (d2 - 1)) / d2;
154}
155
4d284cac 156static unsigned int
1da177e4
LT
157recs_per_track(struct dasd_eckd_characteristics * rdc,
158 unsigned int kl, unsigned int dl)
159{
160 int dn, kn;
161
162 switch (rdc->dev_type) {
163 case 0x3380:
164 if (kl)
165 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
166 ceil_quot(dl + 12, 32));
167 else
168 return 1499 / (15 + ceil_quot(dl + 12, 32));
169 case 0x3390:
170 dn = ceil_quot(dl + 6, 232) + 1;
171 if (kl) {
172 kn = ceil_quot(kl + 6, 232) + 1;
173 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
174 9 + ceil_quot(dl + 6 * dn, 34));
175 } else
176 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
177 case 0x9345:
178 dn = ceil_quot(dl + 6, 232) + 1;
179 if (kl) {
180 kn = ceil_quot(kl + 6, 232) + 1;
181 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
182 ceil_quot(dl + 6 * dn, 34));
183 } else
184 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
185 }
186 return 0;
187}
188
b44b0ab3
SW
189static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
190{
191 geo->cyl = (__u16) cyl;
192 geo->head = cyl >> 16;
193 geo->head <<= 4;
194 geo->head |= head;
195}
196
5628683c 197static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
45f186be 198 struct dasd_device *device)
1da177e4 199{
543691a4 200 struct dasd_eckd_private *private = device->private;
d54853ef 201 int rc;
1da177e4 202
5628683c
SH
203 rc = get_phys_clock(&data->ep_sys_time);
204 /*
205 * Ignore return code if XRC is not supported or
206 * sync clock is switched off
207 */
208 if ((rc && !private->rdc_data.facilities.XRC_supported) ||
209 rc == -EOPNOTSUPP || rc == -EACCES)
d54853ef 210 return 0;
1da177e4 211
45f186be 212 /* switch on System Time Stamp - needed for XRC Support */
d54853ef
MS
213 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
214 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
1da177e4 215
45f186be
JH
216 if (ccw) {
217 ccw->count = sizeof(struct DE_eckd_data);
218 ccw->flags |= CCW_FLAG_SLI;
219 }
220
d54853ef
MS
221 return rc;
222}
1da177e4 223
4d284cac 224static int
b44b0ab3 225define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
45f186be
JH
226 unsigned int totrk, int cmd, struct dasd_device *device,
227 int blksize)
1da177e4 228{
543691a4 229 struct dasd_eckd_private *private = device->private;
b44b0ab3 230 u16 heads, beghead, endhead;
45f186be 231 u32 begcyl, endcyl;
d54853ef 232 int rc = 0;
1da177e4 233
45f186be
JH
234 if (ccw) {
235 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
236 ccw->flags = 0;
237 ccw->count = 16;
238 ccw->cda = (__u32)__pa(data);
239 }
1da177e4 240
8e09f215 241 memset(data, 0, sizeof(struct DE_eckd_data));
1da177e4
LT
242 switch (cmd) {
243 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
244 case DASD_ECKD_CCW_READ_RECORD_ZERO:
245 case DASD_ECKD_CCW_READ:
246 case DASD_ECKD_CCW_READ_MT:
247 case DASD_ECKD_CCW_READ_CKD:
248 case DASD_ECKD_CCW_READ_CKD_MT:
249 case DASD_ECKD_CCW_READ_KD:
250 case DASD_ECKD_CCW_READ_KD_MT:
1da177e4
LT
251 data->mask.perm = 0x1;
252 data->attributes.operation = private->attrib.operation;
253 break;
8fd57520
JH
254 case DASD_ECKD_CCW_READ_COUNT:
255 data->mask.perm = 0x1;
256 data->attributes.operation = DASD_BYPASS_CACHE;
257 break;
45f186be
JH
258 case DASD_ECKD_CCW_READ_TRACK:
259 case DASD_ECKD_CCW_READ_TRACK_DATA:
260 data->mask.perm = 0x1;
261 data->attributes.operation = private->attrib.operation;
262 data->blk_size = 0;
263 break;
1da177e4
LT
264 case DASD_ECKD_CCW_WRITE:
265 case DASD_ECKD_CCW_WRITE_MT:
266 case DASD_ECKD_CCW_WRITE_KD:
267 case DASD_ECKD_CCW_WRITE_KD_MT:
268 data->mask.perm = 0x02;
269 data->attributes.operation = private->attrib.operation;
5628683c 270 rc = set_timestamp(ccw, data, device);
1da177e4
LT
271 break;
272 case DASD_ECKD_CCW_WRITE_CKD:
273 case DASD_ECKD_CCW_WRITE_CKD_MT:
274 data->attributes.operation = DASD_BYPASS_CACHE;
5628683c 275 rc = set_timestamp(ccw, data, device);
1da177e4
LT
276 break;
277 case DASD_ECKD_CCW_ERASE:
278 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
279 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
280 data->mask.perm = 0x3;
281 data->mask.auth = 0x1;
282 data->attributes.operation = DASD_BYPASS_CACHE;
5628683c 283 rc = set_timestamp(ccw, data, device);
45f186be
JH
284 break;
285 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
286 data->mask.perm = 0x03;
287 data->attributes.operation = private->attrib.operation;
288 data->blk_size = 0;
289 break;
290 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
291 data->mask.perm = 0x02;
292 data->attributes.operation = private->attrib.operation;
293 data->blk_size = blksize;
5628683c 294 rc = set_timestamp(ccw, data, device);
1da177e4
LT
295 break;
296 default:
fc19f381
SH
297 dev_err(&device->cdev->dev,
298 "0x%x is not a known command\n", cmd);
1da177e4
LT
299 break;
300 }
301
302 data->attributes.mode = 0x3; /* ECKD */
303
304 if ((private->rdc_data.cu_type == 0x2105 ||
305 private->rdc_data.cu_type == 0x2107 ||
306 private->rdc_data.cu_type == 0x1750)
307 && !(private->uses_cdl && trk < 2))
308 data->ga_extended |= 0x40; /* Regular Data Format Mode */
309
b44b0ab3
SW
310 heads = private->rdc_data.trk_per_cyl;
311 begcyl = trk / heads;
312 beghead = trk % heads;
313 endcyl = totrk / heads;
314 endhead = totrk % heads;
1da177e4
LT
315
316 /* check for sequential prestage - enhance cylinder range */
317 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
318 data->attributes.operation == DASD_SEQ_ACCESS) {
138c014d 319
b44b0ab3
SW
320 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
321 endcyl += private->attrib.nr_cyl;
1da177e4 322 else
b44b0ab3 323 endcyl = (private->real_cyl - 1);
1da177e4
LT
324 }
325
b44b0ab3
SW
326 set_ch_t(&data->beg_ext, begcyl, beghead);
327 set_ch_t(&data->end_ext, endcyl, endhead);
d54853ef 328 return rc;
1da177e4
LT
329}
330
8e09f215 331
45f186be
JH
332static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
333 unsigned int trk, unsigned int rec_on_trk,
334 int count, int cmd, struct dasd_device *device,
335 unsigned int reclen, unsigned int tlf)
f3eb5384 336{
543691a4 337 struct dasd_eckd_private *private = device->private;
f3eb5384
SW
338 int sector;
339 int dn, d;
340
45f186be
JH
341 if (ccw) {
342 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
343 ccw->flags = 0;
344 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
345 ccw->count = 22;
346 else
347 ccw->count = 20;
348 ccw->cda = (__u32)__pa(data);
349 }
350
f3eb5384
SW
351 memset(data, 0, sizeof(*data));
352 sector = 0;
353 if (rec_on_trk) {
354 switch (private->rdc_data.dev_type) {
355 case 0x3390:
356 dn = ceil_quot(reclen + 6, 232);
357 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
358 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
359 break;
360 case 0x3380:
361 d = 7 + ceil_quot(reclen + 12, 32);
362 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
363 break;
364 }
365 }
366 data->sector = sector;
367 /* note: meaning of count depends on the operation
368 * for record based I/O it's the number of records, but for
369 * track based I/O it's the number of tracks
370 */
371 data->count = count;
372 switch (cmd) {
373 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
374 data->operation.orientation = 0x3;
375 data->operation.operation = 0x03;
376 break;
377 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
378 data->operation.orientation = 0x3;
379 data->operation.operation = 0x16;
380 break;
381 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
382 data->operation.orientation = 0x1;
383 data->operation.operation = 0x03;
384 data->count++;
385 break;
386 case DASD_ECKD_CCW_READ_RECORD_ZERO:
387 data->operation.orientation = 0x3;
388 data->operation.operation = 0x16;
389 data->count++;
390 break;
391 case DASD_ECKD_CCW_WRITE:
392 case DASD_ECKD_CCW_WRITE_MT:
393 case DASD_ECKD_CCW_WRITE_KD:
394 case DASD_ECKD_CCW_WRITE_KD_MT:
395 data->auxiliary.length_valid = 0x1;
396 data->length = reclen;
397 data->operation.operation = 0x01;
398 break;
399 case DASD_ECKD_CCW_WRITE_CKD:
400 case DASD_ECKD_CCW_WRITE_CKD_MT:
401 data->auxiliary.length_valid = 0x1;
402 data->length = reclen;
403 data->operation.operation = 0x03;
404 break;
e4dbb0f2
SH
405 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
406 data->operation.orientation = 0x0;
407 data->operation.operation = 0x3F;
408 data->extended_operation = 0x11;
409 data->length = 0;
410 data->extended_parameter_length = 0x02;
411 if (data->count > 8) {
412 data->extended_parameter[0] = 0xFF;
413 data->extended_parameter[1] = 0xFF;
414 data->extended_parameter[1] <<= (16 - count);
415 } else {
416 data->extended_parameter[0] = 0xFF;
417 data->extended_parameter[0] <<= (8 - count);
418 data->extended_parameter[1] = 0x00;
419 }
420 data->sector = 0xFF;
421 break;
f3eb5384
SW
422 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
423 data->auxiliary.length_valid = 0x1;
424 data->length = reclen; /* not tlf, as one might think */
425 data->operation.operation = 0x3F;
426 data->extended_operation = 0x23;
427 break;
428 case DASD_ECKD_CCW_READ:
429 case DASD_ECKD_CCW_READ_MT:
430 case DASD_ECKD_CCW_READ_KD:
431 case DASD_ECKD_CCW_READ_KD_MT:
432 data->auxiliary.length_valid = 0x1;
433 data->length = reclen;
434 data->operation.operation = 0x06;
435 break;
436 case DASD_ECKD_CCW_READ_CKD:
437 case DASD_ECKD_CCW_READ_CKD_MT:
438 data->auxiliary.length_valid = 0x1;
439 data->length = reclen;
440 data->operation.operation = 0x16;
441 break;
442 case DASD_ECKD_CCW_READ_COUNT:
443 data->operation.operation = 0x06;
444 break;
e4dbb0f2
SH
445 case DASD_ECKD_CCW_READ_TRACK:
446 data->operation.orientation = 0x1;
447 data->operation.operation = 0x0C;
448 data->extended_parameter_length = 0;
449 data->sector = 0xFF;
450 break;
f3eb5384
SW
451 case DASD_ECKD_CCW_READ_TRACK_DATA:
452 data->auxiliary.length_valid = 0x1;
453 data->length = tlf;
454 data->operation.operation = 0x0C;
455 break;
456 case DASD_ECKD_CCW_ERASE:
457 data->length = reclen;
458 data->auxiliary.length_valid = 0x1;
459 data->operation.operation = 0x0b;
460 break;
461 default:
462 DBF_DEV_EVENT(DBF_ERR, device,
463 "fill LRE unknown opcode 0x%x", cmd);
464 BUG();
465 }
466 set_ch_t(&data->seek_addr,
467 trk / private->rdc_data.trk_per_cyl,
468 trk % private->rdc_data.trk_per_cyl);
469 data->search_arg.cyl = data->seek_addr.cyl;
470 data->search_arg.head = data->seek_addr.head;
471 data->search_arg.record = rec_on_trk;
472}
473
474static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
475 unsigned int trk, unsigned int totrk, int cmd,
476 struct dasd_device *basedev, struct dasd_device *startdev,
45f186be 477 unsigned int format, unsigned int rec_on_trk, int count,
f3eb5384 478 unsigned int blksize, unsigned int tlf)
8e09f215
SW
479{
480 struct dasd_eckd_private *basepriv, *startpriv;
f3eb5384 481 struct LRE_eckd_data *lredata;
45f186be 482 struct DE_eckd_data *dedata;
8e09f215
SW
483 int rc = 0;
484
543691a4
SO
485 basepriv = basedev->private;
486 startpriv = startdev->private;
f3eb5384
SW
487 dedata = &pfxdata->define_extent;
488 lredata = &pfxdata->locate_record;
8e09f215
SW
489
490 ccw->cmd_code = DASD_ECKD_CCW_PFX;
491 ccw->flags = 0;
e4dbb0f2
SH
492 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
493 ccw->count = sizeof(*pfxdata) + 2;
494 ccw->cda = (__u32) __pa(pfxdata);
495 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
496 } else {
497 ccw->count = sizeof(*pfxdata);
498 ccw->cda = (__u32) __pa(pfxdata);
499 memset(pfxdata, 0, sizeof(*pfxdata));
500 }
8e09f215 501
8e09f215 502 /* prefix data */
f3eb5384
SW
503 if (format > 1) {
504 DBF_DEV_EVENT(DBF_ERR, basedev,
505 "PFX LRE unknown format 0x%x", format);
506 BUG();
507 return -EINVAL;
508 }
509 pfxdata->format = format;
4abb08c2
SW
510 pfxdata->base_address = basepriv->ned->unit_addr;
511 pfxdata->base_lss = basepriv->ned->ID;
f3eb5384 512 pfxdata->validity.define_extent = 1;
8e09f215
SW
513
514 /* private uid is kept up to date, conf_data may be outdated */
da340f92 515 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
8e09f215 516 pfxdata->validity.verify_base = 1;
da340f92
SH
517
518 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
519 pfxdata->validity.verify_base = 1;
520 pfxdata->validity.hyper_pav = 1;
8e09f215
SW
521 }
522
45f186be 523 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
8e09f215 524
45f186be
JH
525 /*
526 * For some commands the System Time Stamp is set in the define extent
527 * data when XRC is supported. The validity of the time stamp must be
528 * reflected in the prefix data as well.
529 */
530 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
531 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
f3eb5384
SW
532
533 if (format == 1) {
45f186be
JH
534 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
535 basedev, blksize, tlf);
f3eb5384
SW
536 }
537
8e09f215
SW
538 return rc;
539}
540
f3eb5384
SW
541static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
542 unsigned int trk, unsigned int totrk, int cmd,
543 struct dasd_device *basedev, struct dasd_device *startdev)
544{
545 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
546 0, 0, 0, 0, 0);
547}
548
4d284cac 549static void
b44b0ab3
SW
550locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
551 unsigned int rec_on_trk, int no_rec, int cmd,
1da177e4
LT
552 struct dasd_device * device, int reclen)
553{
543691a4 554 struct dasd_eckd_private *private = device->private;
1da177e4
LT
555 int sector;
556 int dn, d;
138c014d 557
1da177e4
LT
558 DBF_DEV_EVENT(DBF_INFO, device,
559 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
560 trk, rec_on_trk, no_rec, cmd, reclen);
561
562 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
563 ccw->flags = 0;
564 ccw->count = 16;
565 ccw->cda = (__u32) __pa(data);
566
8e09f215 567 memset(data, 0, sizeof(struct LO_eckd_data));
1da177e4
LT
568 sector = 0;
569 if (rec_on_trk) {
570 switch (private->rdc_data.dev_type) {
571 case 0x3390:
572 dn = ceil_quot(reclen + 6, 232);
573 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
574 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
575 break;
576 case 0x3380:
577 d = 7 + ceil_quot(reclen + 12, 32);
578 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
579 break;
580 }
581 }
582 data->sector = sector;
583 data->count = no_rec;
584 switch (cmd) {
585 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
586 data->operation.orientation = 0x3;
587 data->operation.operation = 0x03;
588 break;
589 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
590 data->operation.orientation = 0x3;
591 data->operation.operation = 0x16;
592 break;
593 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
594 data->operation.orientation = 0x1;
595 data->operation.operation = 0x03;
596 data->count++;
597 break;
598 case DASD_ECKD_CCW_READ_RECORD_ZERO:
599 data->operation.orientation = 0x3;
600 data->operation.operation = 0x16;
601 data->count++;
602 break;
603 case DASD_ECKD_CCW_WRITE:
604 case DASD_ECKD_CCW_WRITE_MT:
605 case DASD_ECKD_CCW_WRITE_KD:
606 case DASD_ECKD_CCW_WRITE_KD_MT:
607 data->auxiliary.last_bytes_used = 0x1;
608 data->length = reclen;
609 data->operation.operation = 0x01;
610 break;
611 case DASD_ECKD_CCW_WRITE_CKD:
612 case DASD_ECKD_CCW_WRITE_CKD_MT:
613 data->auxiliary.last_bytes_used = 0x1;
614 data->length = reclen;
615 data->operation.operation = 0x03;
616 break;
617 case DASD_ECKD_CCW_READ:
618 case DASD_ECKD_CCW_READ_MT:
619 case DASD_ECKD_CCW_READ_KD:
620 case DASD_ECKD_CCW_READ_KD_MT:
621 data->auxiliary.last_bytes_used = 0x1;
622 data->length = reclen;
623 data->operation.operation = 0x06;
624 break;
625 case DASD_ECKD_CCW_READ_CKD:
626 case DASD_ECKD_CCW_READ_CKD_MT:
627 data->auxiliary.last_bytes_used = 0x1;
628 data->length = reclen;
629 data->operation.operation = 0x16;
630 break;
631 case DASD_ECKD_CCW_READ_COUNT:
632 data->operation.operation = 0x06;
633 break;
634 case DASD_ECKD_CCW_ERASE:
635 data->length = reclen;
636 data->auxiliary.last_bytes_used = 0x1;
637 data->operation.operation = 0x0b;
638 break;
639 default:
fc19f381
SH
640 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
641 "opcode 0x%x", cmd);
1da177e4 642 }
b44b0ab3
SW
643 set_ch_t(&data->seek_addr,
644 trk / private->rdc_data.trk_per_cyl,
645 trk % private->rdc_data.trk_per_cyl);
646 data->search_arg.cyl = data->seek_addr.cyl;
647 data->search_arg.head = data->seek_addr.head;
1da177e4
LT
648 data->search_arg.record = rec_on_trk;
649}
650
651/*
652 * Returns 1 if the block is one of the special blocks that needs
653 * to get read/written with the KD variant of the command.
654 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
655 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
656 * Luckily the KD variants differ only by one bit (0x08) from the
657 * normal variant. So don't wonder about code like:
658 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
659 * ccw->cmd_code |= 0x8;
660 */
661static inline int
662dasd_eckd_cdl_special(int blk_per_trk, int recid)
663{
664 if (recid < 3)
665 return 1;
666 if (recid < blk_per_trk)
667 return 0;
668 if (recid < 2 * blk_per_trk)
669 return 1;
670 return 0;
671}
672
673/*
674 * Returns the record size for the special blocks of the cdl format.
675 * Only returns something useful if dasd_eckd_cdl_special is true
676 * for the recid.
677 */
678static inline int
679dasd_eckd_cdl_reclen(int recid)
680{
681 if (recid < 3)
682 return sizes_trk0[recid];
683 return LABEL_SIZE;
684}
b206181d
SH
685/* create unique id from private structure. */
686static void create_uid(struct dasd_eckd_private *private)
3d052595 687{
4abb08c2 688 int count;
b206181d 689 struct dasd_uid *uid;
3d052595 690
2dedf0d9 691 uid = &private->uid;
3d052595 692 memset(uid, 0, sizeof(struct dasd_uid));
4abb08c2 693 memcpy(uid->vendor, private->ned->HDA_manufacturer,
d0710c7c 694 sizeof(uid->vendor) - 1);
3d052595 695 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
4abb08c2 696 memcpy(uid->serial, private->ned->HDA_location,
d0710c7c 697 sizeof(uid->serial) - 1);
3d052595 698 EBCASC(uid->serial, sizeof(uid->serial) - 1);
4abb08c2 699 uid->ssid = private->gneq->subsystemID;
a419aef8 700 uid->real_unit_addr = private->ned->unit_addr;
4abb08c2
SW
701 if (private->sneq) {
702 uid->type = private->sneq->sua_flags;
8e09f215 703 if (uid->type == UA_BASE_PAV_ALIAS)
4abb08c2 704 uid->base_unit_addr = private->sneq->base_unit_addr;
8e09f215
SW
705 } else {
706 uid->type = UA_BASE_DEVICE;
707 }
4abb08c2
SW
708 if (private->vdsneq) {
709 for (count = 0; count < 16; count++) {
710 sprintf(uid->vduit+2*count, "%02x",
711 private->vdsneq->uit[count]);
712 }
713 }
b206181d
SH
714}
715
716/*
717 * Generate device unique id that specifies the physical device.
718 */
719static int dasd_eckd_generate_uid(struct dasd_device *device)
720{
543691a4 721 struct dasd_eckd_private *private = device->private;
b206181d
SH
722 unsigned long flags;
723
b206181d
SH
724 if (!private)
725 return -ENODEV;
726 if (!private->ned || !private->gneq)
727 return -ENODEV;
728 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
729 create_uid(private);
2dedf0d9 730 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3d052595
HH
731 return 0;
732}
733
2dedf0d9
SH
734static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
735{
543691a4 736 struct dasd_eckd_private *private = device->private;
2dedf0d9
SH
737 unsigned long flags;
738
543691a4 739 if (private) {
2dedf0d9
SH
740 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
741 *uid = private->uid;
742 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
743 return 0;
744 }
745 return -EINVAL;
746}
747
b206181d
SH
748/*
749 * compare device UID with data of a given dasd_eckd_private structure
750 * return 0 for match
751 */
752static int dasd_eckd_compare_path_uid(struct dasd_device *device,
753 struct dasd_eckd_private *private)
754{
755 struct dasd_uid device_uid;
756
757 create_uid(private);
758 dasd_eckd_get_uid(device, &device_uid);
759
760 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
761}
762
a4d26c6a
SW
763static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
764 struct dasd_ccw_req *cqr,
765 __u8 *rcd_buffer,
766 __u8 lpm)
17283b56 767{
17283b56 768 struct ccw1 *ccw;
a4d26c6a
SW
769 /*
770 * buffer has to start with EBCDIC "V1.0" to show
771 * support for virtual device SNEQ
772 */
773 rcd_buffer[0] = 0xE5;
774 rcd_buffer[1] = 0xF1;
775 rcd_buffer[2] = 0x4B;
776 rcd_buffer[3] = 0xF0;
17283b56
CH
777
778 ccw = cqr->cpaddr;
a4d26c6a
SW
779 ccw->cmd_code = DASD_ECKD_CCW_RCD;
780 ccw->flags = 0;
17283b56 781 ccw->cda = (__u32)(addr_t)rcd_buffer;
a4d26c6a
SW
782 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
783 cqr->magic = DASD_ECKD_MAGIC;
17283b56 784
8e09f215
SW
785 cqr->startdev = device;
786 cqr->memdev = device;
787 cqr->block = NULL;
17283b56
CH
788 cqr->expires = 10*HZ;
789 cqr->lpm = lpm;
eb6e199b 790 cqr->retries = 256;
1aae0560 791 cqr->buildclk = get_tod_clock();
17283b56 792 cqr->status = DASD_CQR_FILLED;
a4d26c6a
SW
793 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
794}
795
5915a873
SH
796/*
797 * Wakeup helper for read_conf
798 * if the cqr is not done and needs some error recovery
799 * the buffer has to be re-initialized with the EBCDIC "V1.0"
800 * to show support for virtual device SNEQ
801 */
802static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
803{
804 struct ccw1 *ccw;
805 __u8 *rcd_buffer;
806
807 if (cqr->status != DASD_CQR_DONE) {
808 ccw = cqr->cpaddr;
809 rcd_buffer = (__u8 *)((addr_t) ccw->cda);
810 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
811
812 rcd_buffer[0] = 0xE5;
813 rcd_buffer[1] = 0xF1;
814 rcd_buffer[2] = 0x4B;
815 rcd_buffer[3] = 0xF0;
816 }
817 dasd_wakeup_cb(cqr, data);
818}
819
a4d26c6a
SW
820static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
821 struct dasd_ccw_req *cqr,
822 __u8 *rcd_buffer,
823 __u8 lpm)
824{
825 struct ciw *ciw;
826 int rc;
827 /*
828 * sanity check: scan for RCD command in extended SenseID data
829 * some devices do not support RCD
830 */
831 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
832 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
833 return -EOPNOTSUPP;
834
835 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
836 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5a27e60d 837 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
a4d26c6a 838 cqr->retries = 5;
5915a873 839 cqr->callback = read_conf_cb;
a4d26c6a
SW
840 rc = dasd_sleep_on_immediatly(cqr);
841 return rc;
17283b56
CH
842}
843
844static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
845 void **rcd_buffer,
846 int *rcd_buffer_size, __u8 lpm)
847{
848 struct ciw *ciw;
849 char *rcd_buf = NULL;
850 int ret;
851 struct dasd_ccw_req *cqr;
852
853 /*
a4d26c6a
SW
854 * sanity check: scan for RCD command in extended SenseID data
855 * some devices do not support RCD
17283b56
CH
856 */
857 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
a4d26c6a 858 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
17283b56
CH
859 ret = -EOPNOTSUPP;
860 goto out_error;
861 }
a4d26c6a 862 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
17283b56
CH
863 if (!rcd_buf) {
864 ret = -ENOMEM;
865 goto out_error;
866 }
a4d26c6a
SW
867 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
868 0, /* use rcd_buf as data ara */
c5205f2f 869 device, NULL);
17283b56 870 if (IS_ERR(cqr)) {
a4d26c6a
SW
871 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
872 "Could not allocate RCD request");
873 ret = -ENOMEM;
17283b56
CH
874 goto out_error;
875 }
a4d26c6a 876 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
5915a873 877 cqr->callback = read_conf_cb;
17283b56
CH
878 ret = dasd_sleep_on(cqr);
879 /*
880 * on success we update the user input parms
881 */
8e09f215 882 dasd_sfree_request(cqr, cqr->memdev);
17283b56
CH
883 if (ret)
884 goto out_error;
885
a4d26c6a 886 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
17283b56
CH
887 *rcd_buffer = rcd_buf;
888 return 0;
889out_error:
890 kfree(rcd_buf);
891 *rcd_buffer = NULL;
892 *rcd_buffer_size = 0;
893 return ret;
894}
895
4abb08c2
SW
896static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
897{
898
899 struct dasd_sneq *sneq;
900 int i, count;
901
902 private->ned = NULL;
903 private->sneq = NULL;
904 private->vdsneq = NULL;
905 private->gneq = NULL;
906 count = private->conf_len / sizeof(struct dasd_sneq);
907 sneq = (struct dasd_sneq *)private->conf_data;
908 for (i = 0; i < count; ++i) {
909 if (sneq->flags.identifier == 1 && sneq->format == 1)
910 private->sneq = sneq;
911 else if (sneq->flags.identifier == 1 && sneq->format == 4)
912 private->vdsneq = (struct vd_sneq *)sneq;
913 else if (sneq->flags.identifier == 2)
914 private->gneq = (struct dasd_gneq *)sneq;
915 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
916 private->ned = (struct dasd_ned *)sneq;
917 sneq++;
918 }
919 if (!private->ned || !private->gneq) {
920 private->ned = NULL;
921 private->sneq = NULL;
922 private->vdsneq = NULL;
923 private->gneq = NULL;
924 return -EINVAL;
925 }
926 return 0;
927
928};
929
930static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
931{
932 struct dasd_gneq *gneq;
933 int i, count, found;
934
935 count = conf_len / sizeof(*gneq);
936 gneq = (struct dasd_gneq *)conf_data;
937 found = 0;
938 for (i = 0; i < count; ++i) {
939 if (gneq->flags.identifier == 2) {
940 found = 1;
941 break;
942 }
943 gneq++;
944 }
945 if (found)
946 return ((char *)gneq)[18] & 0x07;
947 else
948 return 0;
949}
950
c7c0c9de
SH
951static void dasd_eckd_clear_conf_data(struct dasd_device *device)
952{
543691a4 953 struct dasd_eckd_private *private = device->private;
c7c0c9de
SH
954 int i;
955
c7c0c9de
SH
956 private->conf_data = NULL;
957 private->conf_len = 0;
958 for (i = 0; i < 8; i++) {
c9346151
SH
959 kfree(device->path[i].conf_data);
960 device->path[i].conf_data = NULL;
a521b048
SH
961 device->path[i].cssid = 0;
962 device->path[i].ssid = 0;
963 device->path[i].chpid = 0;
c7c0c9de
SH
964 }
965}
966
967
4abb08c2 968static int dasd_eckd_read_conf(struct dasd_device *device)
1da177e4
LT
969{
970 void *conf_data;
971 int conf_len, conf_data_saved;
b179b037 972 int rc, path_err, pos;
a4d26c6a 973 __u8 lpm, opm;
b206181d 974 struct dasd_eckd_private *private, path_private;
b206181d
SH
975 struct dasd_uid *uid;
976 char print_path_uid[60], print_device_uid[60];
ded27d8d 977 struct channel_path_desc_fmt0 *chp_desc;
a521b048 978 struct subchannel_id sch_id;
1da177e4 979
543691a4 980 private = device->private;
a4d26c6a 981 opm = ccw_device_get_path_mask(device->cdev);
a521b048 982 ccw_device_get_schid(device->cdev, &sch_id);
1da177e4 983 conf_data_saved = 0;
55d3a85c 984 path_err = 0;
1da177e4
LT
985 /* get configuration data per operational path */
986 for (lpm = 0x80; lpm; lpm>>= 1) {
b206181d
SH
987 if (!(lpm & opm))
988 continue;
989 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
990 &conf_len, lpm);
991 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
992 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
993 "Read configuration data returned "
994 "error %d", rc);
995 return rc;
996 }
997 if (conf_data == NULL) {
998 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
999 "No configuration data "
1000 "retrieved");
1001 /* no further analysis possible */
c9346151 1002 dasd_path_add_opm(device, opm);
b206181d
SH
1003 continue; /* no error */
1004 }
1005 /* save first valid configuration data */
1006 if (!conf_data_saved) {
c7c0c9de
SH
1007 /* initially clear previously stored conf_data */
1008 dasd_eckd_clear_conf_data(device);
b206181d
SH
1009 private->conf_data = conf_data;
1010 private->conf_len = conf_len;
1011 if (dasd_eckd_identify_conf_parts(private)) {
1012 private->conf_data = NULL;
1013 private->conf_len = 0;
1014 kfree(conf_data);
1015 continue;
1da177e4 1016 }
c7c0c9de
SH
1017 pos = pathmask_to_pos(lpm);
1018 /* store per path conf_data */
c9346151 1019 device->path[pos].conf_data = conf_data;
a521b048
SH
1020 device->path[pos].cssid = sch_id.cssid;
1021 device->path[pos].ssid = sch_id.ssid;
1022 chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1023 if (chp_desc)
1024 device->path[pos].chpid = chp_desc->chpid;
1025 kfree(chp_desc);
b206181d
SH
1026 /*
1027 * build device UID that other path data
1028 * can be compared to it
1029 */
1030 dasd_eckd_generate_uid(device);
1031 conf_data_saved++;
1032 } else {
1033 path_private.conf_data = conf_data;
1034 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1035 if (dasd_eckd_identify_conf_parts(
1036 &path_private)) {
1037 path_private.conf_data = NULL;
1038 path_private.conf_len = 0;
1039 kfree(conf_data);
1040 continue;
1da177e4 1041 }
b206181d
SH
1042 if (dasd_eckd_compare_path_uid(
1043 device, &path_private)) {
1044 uid = &path_private.uid;
1045 if (strlen(uid->vduit) > 0)
1046 snprintf(print_path_uid,
1047 sizeof(print_path_uid),
1048 "%s.%s.%04x.%02x.%s",
1049 uid->vendor, uid->serial,
1050 uid->ssid, uid->real_unit_addr,
1051 uid->vduit);
1052 else
1053 snprintf(print_path_uid,
1054 sizeof(print_path_uid),
1055 "%s.%s.%04x.%02x",
1056 uid->vendor, uid->serial,
1057 uid->ssid,
1058 uid->real_unit_addr);
1059 uid = &private->uid;
1060 if (strlen(uid->vduit) > 0)
1061 snprintf(print_device_uid,
1062 sizeof(print_device_uid),
1063 "%s.%s.%04x.%02x.%s",
1064 uid->vendor, uid->serial,
1065 uid->ssid, uid->real_unit_addr,
1066 uid->vduit);
1067 else
1068 snprintf(print_device_uid,
1069 sizeof(print_device_uid),
1070 "%s.%s.%04x.%02x",
1071 uid->vendor, uid->serial,
1072 uid->ssid,
1073 uid->real_unit_addr);
1074 dev_err(&device->cdev->dev,
1075 "Not all channel paths lead to "
1076 "the same device, path %02X leads to "
1077 "device %s instead of %s\n", lpm,
1078 print_path_uid, print_device_uid);
55d3a85c 1079 path_err = -EINVAL;
c9346151 1080 dasd_path_add_cablepm(device, lpm);
55d3a85c 1081 continue;
1da177e4 1082 }
c7c0c9de
SH
1083 pos = pathmask_to_pos(lpm);
1084 /* store per path conf_data */
c9346151 1085 device->path[pos].conf_data = conf_data;
a521b048
SH
1086 device->path[pos].cssid = sch_id.cssid;
1087 device->path[pos].ssid = sch_id.ssid;
1088 chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1089 if (chp_desc)
1090 device->path[pos].chpid = chp_desc->chpid;
1091 kfree(chp_desc);
b206181d
SH
1092 path_private.conf_data = NULL;
1093 path_private.conf_len = 0;
1094 }
1095 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1096 case 0x02:
c9346151 1097 dasd_path_add_nppm(device, lpm);
b206181d
SH
1098 break;
1099 case 0x03:
c9346151 1100 dasd_path_add_ppm(device, lpm);
b206181d 1101 break;
1da177e4 1102 }
c9346151
SH
1103 if (!dasd_path_get_opm(device)) {
1104 dasd_path_set_opm(device, lpm);
ccc0e7dc
SH
1105 dasd_generic_path_operational(device);
1106 } else {
c9346151 1107 dasd_path_add_opm(device, lpm);
ccc0e7dc 1108 }
1da177e4 1109 }
b206181d 1110
55d3a85c 1111 return path_err;
1da177e4
LT
1112}
1113
a521b048
SH
1114static u32 get_fcx_max_data(struct dasd_device *device)
1115{
1116 struct dasd_eckd_private *private = device->private;
1117 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1118 int tpm, mdc;
1119
1120 if (dasd_nofcx)
1121 return 0;
1122 /* is transport mode supported? */
1123 fcx_in_css = css_general_characteristics.fcx;
1124 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1125 fcx_in_features = private->features.feature[40] & 0x80;
1126 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1127
1128 if (!tpm)
1129 return 0;
1130
1131 mdc = ccw_device_get_mdc(device->cdev, 0);
1132 if (mdc < 0) {
1133 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1134 return 0;
1135 } else {
1136 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1137 }
1138}
1139
a4d26c6a
SW
1140static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1141{
543691a4 1142 struct dasd_eckd_private *private = device->private;
a4d26c6a
SW
1143 int mdc;
1144 u32 fcx_max_data;
1145
a4d26c6a
SW
1146 if (private->fcx_max_data) {
1147 mdc = ccw_device_get_mdc(device->cdev, lpm);
1148 if ((mdc < 0)) {
1149 dev_warn(&device->cdev->dev,
1150 "Detecting the maximum data size for zHPF "
1151 "requests failed (rc=%d) for a new path %x\n",
1152 mdc, lpm);
1153 return mdc;
1154 }
0f02c4e7 1155 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
a4d26c6a
SW
1156 if (fcx_max_data < private->fcx_max_data) {
1157 dev_warn(&device->cdev->dev,
1158 "The maximum data size for zHPF requests %u "
1159 "on a new path %x is below the active maximum "
1160 "%u\n", fcx_max_data, lpm,
1161 private->fcx_max_data);
1162 return -EACCES;
1163 }
1164 }
1165 return 0;
1166}
1167
b206181d
SH
1168static int rebuild_device_uid(struct dasd_device *device,
1169 struct path_verification_work_data *data)
1170{
543691a4 1171 struct dasd_eckd_private *private = device->private;
c9346151 1172 __u8 lpm, opm = dasd_path_get_opm(device);
543691a4 1173 int rc = -ENODEV;
b206181d
SH
1174
1175 for (lpm = 0x80; lpm; lpm >>= 1) {
1176 if (!(lpm & opm))
1177 continue;
1178 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1179 memset(&data->cqr, 0, sizeof(data->cqr));
1180 data->cqr.cpaddr = &data->ccw;
1181 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1182 data->rcd_buffer,
1183 lpm);
1184
1185 if (rc) {
1186 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1187 continue;
1188 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1189 "Read configuration data "
1190 "returned error %d", rc);
1191 break;
1192 }
1193 memcpy(private->conf_data, data->rcd_buffer,
1194 DASD_ECKD_RCD_DATA_SIZE);
1195 if (dasd_eckd_identify_conf_parts(private)) {
1196 rc = -ENODEV;
1197 } else /* first valid path is enough */
1198 break;
1199 }
1200
1201 if (!rc)
1202 rc = dasd_eckd_generate_uid(device);
1203
1204 return rc;
1205}
1206
a4d26c6a
SW
1207static void do_path_verification_work(struct work_struct *work)
1208{
1209 struct path_verification_work_data *data;
1210 struct dasd_device *device;
b206181d
SH
1211 struct dasd_eckd_private path_private;
1212 struct dasd_uid *uid;
1213 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
5db8440c 1214 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
a4d26c6a 1215 unsigned long flags;
b206181d 1216 char print_uid[60];
a4d26c6a
SW
1217 int rc;
1218
1219 data = container_of(work, struct path_verification_work_data, worker);
1220 device = data->device;
1221
c8d1c0ff
SH
1222 /* delay path verification until device was resumed */
1223 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1224 schedule_work(work);
1225 return;
1226 }
1eb38023
SH
1227 /* check if path verification already running and delay if so */
1228 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1229 schedule_work(work);
1230 return;
1231 }
a4d26c6a
SW
1232 opm = 0;
1233 npm = 0;
1234 ppm = 0;
1235 epm = 0;
5db8440c
SH
1236 hpfpm = 0;
1237 cablepm = 0;
1238
a4d26c6a 1239 for (lpm = 0x80; lpm; lpm >>= 1) {
b206181d
SH
1240 if (!(lpm & data->tbvpm))
1241 continue;
1242 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1243 memset(&data->cqr, 0, sizeof(data->cqr));
1244 data->cqr.cpaddr = &data->ccw;
1245 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1246 data->rcd_buffer,
1247 lpm);
1248 if (!rc) {
1249 switch (dasd_eckd_path_access(data->rcd_buffer,
1250 DASD_ECKD_RCD_DATA_SIZE)
1251 ) {
1252 case 0x02:
1253 npm |= lpm;
1254 break;
1255 case 0x03:
1256 ppm |= lpm;
1257 break;
1258 }
1259 opm |= lpm;
1260 } else if (rc == -EOPNOTSUPP) {
1261 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1262 "path verification: No configuration "
1263 "data retrieved");
1264 opm |= lpm;
1265 } else if (rc == -EAGAIN) {
1266 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
a4d26c6a
SW
1267 "path verification: device is stopped,"
1268 " try again later");
b206181d
SH
1269 epm |= lpm;
1270 } else {
1271 dev_warn(&device->cdev->dev,
1272 "Reading device feature codes failed "
1273 "(rc=%d) for new path %x\n", rc, lpm);
1274 continue;
1275 }
1276 if (verify_fcx_max_data(device, lpm)) {
1277 opm &= ~lpm;
1278 npm &= ~lpm;
1279 ppm &= ~lpm;
5db8440c 1280 hpfpm |= lpm;
b206181d
SH
1281 continue;
1282 }
1283
1284 /*
1285 * save conf_data for comparison after
1286 * rebuild_device_uid may have changed
1287 * the original data
1288 */
1289 memcpy(&path_rcd_buf, data->rcd_buffer,
1290 DASD_ECKD_RCD_DATA_SIZE);
1291 path_private.conf_data = (void *) &path_rcd_buf;
1292 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1293 if (dasd_eckd_identify_conf_parts(&path_private)) {
1294 path_private.conf_data = NULL;
1295 path_private.conf_len = 0;
1296 continue;
1297 }
1298
1299 /*
1300 * compare path UID with device UID only if at least
1301 * one valid path is left
1302 * in other case the device UID may have changed and
1303 * the first working path UID will be used as device UID
1304 */
c9346151 1305 if (dasd_path_get_opm(device) &&
b206181d
SH
1306 dasd_eckd_compare_path_uid(device, &path_private)) {
1307 /*
1308 * the comparison was not successful
1309 * rebuild the device UID with at least one
1310 * known path in case a z/VM hyperswap command
1311 * has changed the device
1312 *
1313 * after this compare again
1314 *
1315 * if either the rebuild or the recompare fails
1316 * the path can not be used
1317 */
1318 if (rebuild_device_uid(device, data) ||
1319 dasd_eckd_compare_path_uid(
1320 device, &path_private)) {
1321 uid = &path_private.uid;
1322 if (strlen(uid->vduit) > 0)
1323 snprintf(print_uid, sizeof(print_uid),
1324 "%s.%s.%04x.%02x.%s",
1325 uid->vendor, uid->serial,
1326 uid->ssid, uid->real_unit_addr,
1327 uid->vduit);
1328 else
1329 snprintf(print_uid, sizeof(print_uid),
1330 "%s.%s.%04x.%02x",
1331 uid->vendor, uid->serial,
1332 uid->ssid,
1333 uid->real_unit_addr);
1334 dev_err(&device->cdev->dev,
1335 "The newly added channel path %02X "
1336 "will not be used because it leads "
1337 "to a different device %s\n",
1338 lpm, print_uid);
a4d26c6a
SW
1339 opm &= ~lpm;
1340 npm &= ~lpm;
1341 ppm &= ~lpm;
5db8440c 1342 cablepm |= lpm;
b206181d 1343 continue;
a4d26c6a
SW
1344 }
1345 }
b206181d
SH
1346
1347 /*
1348 * There is a small chance that a path is lost again between
1349 * above path verification and the following modification of
1350 * the device opm mask. We could avoid that race here by using
1351 * yet another path mask, but we rather deal with this unlikely
1352 * situation in dasd_start_IO.
1353 */
1354 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
c9346151
SH
1355 if (!dasd_path_get_opm(device) && opm) {
1356 dasd_path_set_opm(device, opm);
b206181d 1357 dasd_generic_path_operational(device);
5db8440c 1358 } else {
c9346151 1359 dasd_path_add_opm(device, opm);
5db8440c 1360 }
c9346151
SH
1361 dasd_path_add_nppm(device, npm);
1362 dasd_path_add_ppm(device, ppm);
1363 dasd_path_add_tbvpm(device, epm);
1364 dasd_path_add_cablepm(device, cablepm);
1365 dasd_path_add_nohpfpm(device, hpfpm);
b206181d 1366 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
a4d26c6a 1367 }
1eb38023 1368 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
a4d26c6a
SW
1369 dasd_put_device(device);
1370 if (data->isglobal)
1371 mutex_unlock(&dasd_path_verification_mutex);
1372 else
1373 kfree(data);
1374}
1375
1376static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1377{
1378 struct path_verification_work_data *data;
1379
1380 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1381 if (!data) {
1382 if (mutex_trylock(&dasd_path_verification_mutex)) {
1383 data = path_verification_worker;
1384 data->isglobal = 1;
1385 } else
1386 return -ENOMEM;
1387 } else {
1388 memset(data, 0, sizeof(*data));
1389 data->isglobal = 0;
1390 }
1391 INIT_WORK(&data->worker, do_path_verification_work);
1392 dasd_get_device(device);
1393 data->device = device;
1394 data->tbvpm = lpm;
1395 schedule_work(&data->worker);
1396 return 0;
1397}
1398
a521b048
SH
1399static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1400{
1401 struct dasd_eckd_private *private = device->private;
1402 unsigned long flags;
1403
1404 if (!private->fcx_max_data)
1405 private->fcx_max_data = get_fcx_max_data(device);
1406 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1407 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1408 dasd_schedule_device_bh(device);
1409 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1410}
1411
8e09f215
SW
1412static int dasd_eckd_read_features(struct dasd_device *device)
1413{
543691a4 1414 struct dasd_eckd_private *private = device->private;
8e09f215
SW
1415 struct dasd_psf_prssd_data *prssdp;
1416 struct dasd_rssd_features *features;
1417 struct dasd_ccw_req *cqr;
1418 struct ccw1 *ccw;
1419 int rc;
8e09f215 1420
68d1e5f0 1421 memset(&private->features, 0, sizeof(struct dasd_rssd_features));
68b781fe 1422 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
8e09f215
SW
1423 (sizeof(struct dasd_psf_prssd_data) +
1424 sizeof(struct dasd_rssd_features)),
c5205f2f 1425 device, NULL);
8e09f215 1426 if (IS_ERR(cqr)) {
b8ed5dd5
SH
1427 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1428 "allocate initialization request");
8e09f215
SW
1429 return PTR_ERR(cqr);
1430 }
1431 cqr->startdev = device;
1432 cqr->memdev = device;
1433 cqr->block = NULL;
eb6e199b 1434 cqr->retries = 256;
8e09f215
SW
1435 cqr->expires = 10 * HZ;
1436
1437 /* Prepare for Read Subsystem Data */
1438 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1439 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1440 prssdp->order = PSF_ORDER_PRSSD;
1441 prssdp->suborder = 0x41; /* Read Feature Codes */
1442 /* all other bytes of prssdp must be zero */
1443
1444 ccw = cqr->cpaddr;
1445 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1446 ccw->count = sizeof(struct dasd_psf_prssd_data);
1447 ccw->flags |= CCW_FLAG_CC;
1448 ccw->cda = (__u32)(addr_t) prssdp;
1449
1450 /* Read Subsystem Data - feature codes */
1451 features = (struct dasd_rssd_features *) (prssdp + 1);
1452 memset(features, 0, sizeof(struct dasd_rssd_features));
1453
1454 ccw++;
1455 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1456 ccw->count = sizeof(struct dasd_rssd_features);
1457 ccw->cda = (__u32)(addr_t) features;
1458
1aae0560 1459 cqr->buildclk = get_tod_clock();
8e09f215
SW
1460 cqr->status = DASD_CQR_FILLED;
1461 rc = dasd_sleep_on(cqr);
1462 if (rc == 0) {
1463 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1464 features = (struct dasd_rssd_features *) (prssdp + 1);
1465 memcpy(&private->features, features,
1466 sizeof(struct dasd_rssd_features));
68d1e5f0
SW
1467 } else
1468 dev_warn(&device->cdev->dev, "Reading device feature codes"
1469 " failed with rc=%d\n", rc);
8e09f215
SW
1470 dasd_sfree_request(cqr, cqr->memdev);
1471 return rc;
1472}
1473
c729696b
JH
1474/* Read Volume Information - Volume Storage Query */
1475static int dasd_eckd_read_vol_info(struct dasd_device *device)
1476{
1477 struct dasd_eckd_private *private = device->private;
1478 struct dasd_psf_prssd_data *prssdp;
1479 struct dasd_rssd_vsq *vsq;
1480 struct dasd_ccw_req *cqr;
1481 struct ccw1 *ccw;
1482 int rc;
1483
1484 /* This command cannot be executed on an alias device */
1485 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1486 private->uid.type == UA_HYPER_PAV_ALIAS)
1487 return 0;
1488
1489 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1490 sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1491 if (IS_ERR(cqr)) {
1492 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1493 "Could not allocate initialization request");
1494 return PTR_ERR(cqr);
1495 }
1496
1497 /* Prepare for Read Subsystem Data */
1498 prssdp = cqr->data;
1499 prssdp->order = PSF_ORDER_PRSSD;
1500 prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
1501 prssdp->lss = private->ned->ID;
1502 prssdp->volume = private->ned->unit_addr;
1503
1504 ccw = cqr->cpaddr;
1505 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1506 ccw->count = sizeof(*prssdp);
1507 ccw->flags |= CCW_FLAG_CC;
1508 ccw->cda = (__u32)(addr_t)prssdp;
1509
1510 /* Read Subsystem Data - Volume Storage Query */
1511 vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1512 memset(vsq, 0, sizeof(*vsq));
1513
1514 ccw++;
1515 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1516 ccw->count = sizeof(*vsq);
1517 ccw->flags |= CCW_FLAG_SLI;
1518 ccw->cda = (__u32)(addr_t)vsq;
1519
1520 cqr->buildclk = get_tod_clock();
1521 cqr->status = DASD_CQR_FILLED;
1522 cqr->startdev = device;
1523 cqr->memdev = device;
1524 cqr->block = NULL;
1525 cqr->retries = 256;
1526 cqr->expires = device->default_expires * HZ;
1527 /* The command might not be supported. Suppress the error output */
1528 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1529
1530 rc = dasd_sleep_on_interruptible(cqr);
1531 if (rc == 0) {
1532 memcpy(&private->vsq, vsq, sizeof(*vsq));
1533 } else {
1534 dev_warn(&device->cdev->dev,
1535 "Reading the volume storage information failed with rc=%d\n", rc);
1536 }
1537
1538 dasd_sfree_request(cqr, cqr->memdev);
1539
1540 return rc;
1541}
1542
1543static int dasd_eckd_is_ese(struct dasd_device *device)
1544{
1545 struct dasd_eckd_private *private = device->private;
1546
1547 return private->vsq.vol_info.ese;
1548}
1549
1550static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1551{
1552 struct dasd_eckd_private *private = device->private;
1553
1554 return private->vsq.extent_pool_id;
1555}
1556
1557/*
1558 * This value represents the total amount of available space. As more space is
1559 * allocated by ESE volumes, this value will decrease.
1560 * The data for this value is therefore updated on any call.
1561 */
1562static int dasd_eckd_space_configured(struct dasd_device *device)
1563{
1564 struct dasd_eckd_private *private = device->private;
1565 int rc;
1566
1567 rc = dasd_eckd_read_vol_info(device);
1568
1569 return rc ? : private->vsq.space_configured;
1570}
1571
1572/*
1573 * The value of space allocated by an ESE volume may have changed and is
1574 * therefore updated on any call.
1575 */
1576static int dasd_eckd_space_allocated(struct dasd_device *device)
1577{
1578 struct dasd_eckd_private *private = device->private;
1579 int rc;
1580
1581 rc = dasd_eckd_read_vol_info(device);
1582
1583 return rc ? : private->vsq.space_allocated;
1584}
1585
1586static int dasd_eckd_logical_capacity(struct dasd_device *device)
1587{
1588 struct dasd_eckd_private *private = device->private;
1589
1590 return private->vsq.logical_capacity;
1591}
1592
1593static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1594 struct dasd_rssd_lcq *lcq)
1595{
1596 struct dasd_eckd_private *private = device->private;
1597 int pool_id = dasd_eckd_ext_pool_id(device);
1598 struct dasd_ext_pool_sum eps;
1599 int i;
1600
1601 for (i = 0; i < lcq->pool_count; i++) {
1602 eps = lcq->ext_pool_sum[i];
1603 if (eps.pool_id == pool_id) {
1604 memcpy(&private->eps, &eps,
1605 sizeof(struct dasd_ext_pool_sum));
1606 }
1607 }
1608}
1609
1610/* Read Extent Pool Information - Logical Configuration Query */
1611static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1612{
1613 struct dasd_eckd_private *private = device->private;
1614 struct dasd_psf_prssd_data *prssdp;
1615 struct dasd_rssd_lcq *lcq;
1616 struct dasd_ccw_req *cqr;
1617 struct ccw1 *ccw;
1618 int rc;
1619
1620 /* This command cannot be executed on an alias device */
1621 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1622 private->uid.type == UA_HYPER_PAV_ALIAS)
1623 return 0;
1624
1625 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1626 sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1627 if (IS_ERR(cqr)) {
1628 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1629 "Could not allocate initialization request");
1630 return PTR_ERR(cqr);
1631 }
1632
1633 /* Prepare for Read Subsystem Data */
1634 prssdp = cqr->data;
1635 memset(prssdp, 0, sizeof(*prssdp));
1636 prssdp->order = PSF_ORDER_PRSSD;
1637 prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */
1638
1639 ccw = cqr->cpaddr;
1640 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1641 ccw->count = sizeof(*prssdp);
1642 ccw->flags |= CCW_FLAG_CC;
1643 ccw->cda = (__u32)(addr_t)prssdp;
1644
1645 lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1646 memset(lcq, 0, sizeof(*lcq));
1647
1648 ccw++;
1649 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1650 ccw->count = sizeof(*lcq);
1651 ccw->flags |= CCW_FLAG_SLI;
1652 ccw->cda = (__u32)(addr_t)lcq;
1653
1654 cqr->buildclk = get_tod_clock();
1655 cqr->status = DASD_CQR_FILLED;
1656 cqr->startdev = device;
1657 cqr->memdev = device;
1658 cqr->block = NULL;
1659 cqr->retries = 256;
1660 cqr->expires = device->default_expires * HZ;
1661 /* The command might not be supported. Suppress the error output */
1662 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1663
1664 rc = dasd_sleep_on_interruptible(cqr);
1665 if (rc == 0) {
1666 dasd_eckd_cpy_ext_pool_data(device, lcq);
1667 } else {
1668 dev_warn(&device->cdev->dev,
1669 "Reading the logical configuration failed with rc=%d\n", rc);
1670 }
1671
1672 dasd_sfree_request(cqr, cqr->memdev);
1673
1674 return rc;
1675}
1676
1677/*
1678 * Depending on the device type, the extent size is specified either as
1679 * cylinders per extent (CKD) or size per extent (FBA)
1680 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1681 */
1682static int dasd_eckd_ext_size(struct dasd_device *device)
1683{
1684 struct dasd_eckd_private *private = device->private;
1685 struct dasd_ext_pool_sum eps = private->eps;
1686
1687 if (!eps.flags.extent_size_valid)
1688 return 0;
1689 if (eps.extent_size.size_1G)
1690 return 1113;
1691 if (eps.extent_size.size_16M)
1692 return 21;
1693
1694 return 0;
1695}
1696
1697static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1698{
1699 struct dasd_eckd_private *private = device->private;
1700
1701 return private->eps.warn_thrshld;
1702}
1703
1704static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1705{
1706 struct dasd_eckd_private *private = device->private;
1707
1708 return private->eps.flags.capacity_at_warnlevel;
1709}
1710
1711/*
1712 * Extent Pool out of space
1713 */
1714static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1715{
1716 struct dasd_eckd_private *private = device->private;
1717
1718 return private->eps.flags.pool_oos;
1719}
8e09f215 1720
40545573
HH
1721/*
1722 * Build CP for Perform Subsystem Function - SSC.
1723 */
f3eb5384
SW
1724static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1725 int enable_pav)
40545573 1726{
8e09f215
SW
1727 struct dasd_ccw_req *cqr;
1728 struct dasd_psf_ssc_data *psf_ssc_data;
1729 struct ccw1 *ccw;
40545573 1730
68b781fe 1731 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
40545573 1732 sizeof(struct dasd_psf_ssc_data),
c5205f2f 1733 device, NULL);
40545573 1734
8e09f215 1735 if (IS_ERR(cqr)) {
fc19f381 1736 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
40545573 1737 "Could not allocate PSF-SSC request");
8e09f215
SW
1738 return cqr;
1739 }
1740 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1741 psf_ssc_data->order = PSF_ORDER_SSC;
626350b6 1742 psf_ssc_data->suborder = 0xc0;
f3eb5384 1743 if (enable_pav) {
626350b6 1744 psf_ssc_data->suborder |= 0x08;
f3eb5384
SW
1745 psf_ssc_data->reserved[0] = 0x88;
1746 }
8e09f215
SW
1747 ccw = cqr->cpaddr;
1748 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1749 ccw->cda = (__u32)(addr_t)psf_ssc_data;
1750 ccw->count = 66;
1751
1752 cqr->startdev = device;
1753 cqr->memdev = device;
1754 cqr->block = NULL;
eb6e199b 1755 cqr->retries = 256;
8e09f215 1756 cqr->expires = 10*HZ;
1aae0560 1757 cqr->buildclk = get_tod_clock();
8e09f215
SW
1758 cqr->status = DASD_CQR_FILLED;
1759 return cqr;
40545573
HH
1760}
1761
1762/*
1763 * Perform Subsystem Function.
1764 * It is necessary to trigger CIO for channel revalidation since this
1765 * call might change behaviour of DASD devices.
1766 */
1767static int
12d7b107
SH
1768dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1769 unsigned long flags)
40545573 1770{
8e09f215
SW
1771 struct dasd_ccw_req *cqr;
1772 int rc;
1773
f3eb5384 1774 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
8e09f215
SW
1775 if (IS_ERR(cqr))
1776 return PTR_ERR(cqr);
1777
12d7b107
SH
1778 /*
1779 * set flags e.g. turn on failfast, to prevent blocking
1780 * the calling function should handle failed requests
1781 */
1782 cqr->flags |= flags;
1783
8e09f215
SW
1784 rc = dasd_sleep_on(cqr);
1785 if (!rc)
1786 /* trigger CIO to reprobe devices */
1787 css_schedule_reprobe();
12d7b107
SH
1788 else if (cqr->intrc == -EAGAIN)
1789 rc = -EAGAIN;
1790
8e09f215
SW
1791 dasd_sfree_request(cqr, cqr->memdev);
1792 return rc;
40545573
HH
1793}
1794
1795/*
1796 * Valide storage server of current device.
1797 */
12d7b107
SH
1798static int dasd_eckd_validate_server(struct dasd_device *device,
1799 unsigned long flags)
40545573 1800{
543691a4
SO
1801 struct dasd_eckd_private *private = device->private;
1802 int enable_pav, rc;
40545573 1803
f9f8d02f
SH
1804 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1805 private->uid.type == UA_HYPER_PAV_ALIAS)
12d7b107 1806 return 0;
40545573 1807 if (dasd_nopav || MACHINE_IS_VM)
f3eb5384
SW
1808 enable_pav = 0;
1809 else
1810 enable_pav = 1;
12d7b107 1811 rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
eb6e199b 1812
8e79a441
HH
1813 /* may be requested feature is not available on server,
1814 * therefore just report error and go ahead */
b8ed5dd5
SH
1815 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1816 "returned rc=%d", private->uid.ssid, rc);
12d7b107 1817 return rc;
40545573
HH
1818}
1819
f1633031
SH
1820/*
1821 * worker to do a validate server in case of a lost pathgroup
1822 */
1823static void dasd_eckd_do_validate_server(struct work_struct *work)
1824{
1825 struct dasd_device *device = container_of(work, struct dasd_device,
1826 kick_validate);
ea4da6ea
SH
1827 unsigned long flags = 0;
1828
1829 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1830 if (dasd_eckd_validate_server(device, flags)
12d7b107
SH
1831 == -EAGAIN) {
1832 /* schedule worker again if failed */
1833 schedule_work(&device->kick_validate);
1834 return;
1835 }
1836
f1633031
SH
1837 dasd_put_device(device);
1838}
1839
1840static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1841{
1842 dasd_get_device(device);
25e2cf1c
SH
1843 /* exit if device not online or in offline processing */
1844 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1845 device->state < DASD_STATE_ONLINE) {
1846 dasd_put_device(device);
1847 return;
1848 }
f1633031 1849 /* queue call to do_validate_server to the kernel event daemon. */
f2608cd4
SH
1850 if (!schedule_work(&device->kick_validate))
1851 dasd_put_device(device);
f1633031
SH
1852}
1853
3d052595
HH
1854/*
1855 * Check device characteristics.
1856 * If the device is accessible using ECKD discipline, the device is enabled.
1857 */
1da177e4
LT
1858static int
1859dasd_eckd_check_characteristics(struct dasd_device *device)
1860{
543691a4 1861 struct dasd_eckd_private *private = device->private;
8e09f215 1862 struct dasd_block *block;
2dedf0d9 1863 struct dasd_uid temp_uid;
f9f8d02f 1864 int rc, i;
33b62a30 1865 int readonly;
7c8faa86 1866 unsigned long value;
1da177e4 1867
f1633031
SH
1868 /* setup work queue for validate server*/
1869 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
59a9ed5f
SH
1870 /* setup work queue for summary unit check */
1871 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
f1633031 1872
454e1fa1
PO
1873 if (!ccw_device_is_pathgroup(device->cdev)) {
1874 dev_warn(&device->cdev->dev,
1875 "A channel path group could not be established\n");
1876 return -EIO;
1877 }
1878 if (!ccw_device_is_multipath(device->cdev)) {
1879 dev_info(&device->cdev->dev,
1880 "The DASD is not operating in multipath mode\n");
1881 }
92636b15
SO
1882 if (!private) {
1883 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1884 if (!private) {
fc19f381
SH
1885 dev_warn(&device->cdev->dev,
1886 "Allocating memory for private DASD data "
1887 "failed\n");
1da177e4
LT
1888 return -ENOMEM;
1889 }
543691a4 1890 device->private = private;
92636b15
SO
1891 } else {
1892 memset(private, 0, sizeof(*private));
1da177e4
LT
1893 }
1894 /* Invalidate status of initial analysis. */
1895 private->init_cqr_status = -1;
1896 /* Set default cache operations. */
1897 private->attrib.operation = DASD_NORMAL_CACHE;
1898 private->attrib.nr_cyl = 0;
1899
40545573
HH
1900 /* Read Configuration Data */
1901 rc = dasd_eckd_read_conf(device);
1902 if (rc)
8e09f215 1903 goto out_err1;
40545573 1904
a521b048 1905 /* set some default values */
7c8faa86 1906 device->default_expires = DASD_EXPIRES;
1f1ee9ad 1907 device->default_retries = DASD_RETRIES;
a521b048
SH
1908 device->path_thrhld = DASD_ECKD_PATH_THRHLD;
1909 device->path_interval = DASD_ECKD_PATH_INTERVAL;
1f1ee9ad 1910
7c8faa86
SH
1911 if (private->gneq) {
1912 value = 1;
1913 for (i = 0; i < private->gneq->timeout.value; i++)
1914 value = 10 * value;
1915 value = value * private->gneq->timeout.number;
1916 /* do not accept useless values */
1917 if (value != 0 && value <= DASD_EXPIRES_MAX)
1918 device->default_expires = value;
1919 }
1920
2dedf0d9
SH
1921 dasd_eckd_get_uid(device, &temp_uid);
1922 if (temp_uid.type == UA_BASE_DEVICE) {
8e09f215
SW
1923 block = dasd_alloc_block();
1924 if (IS_ERR(block)) {
b8ed5dd5
SH
1925 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1926 "could not allocate dasd "
1927 "block structure");
8e09f215
SW
1928 rc = PTR_ERR(block);
1929 goto out_err1;
1930 }
1931 device->block = block;
1932 block->base = device;
1933 }
1934
f9f8d02f
SH
1935 /* register lcu with alias handling, enable PAV */
1936 rc = dasd_alias_make_device_known_to_lcu(device);
1937 if (rc)
8e09f215 1938 goto out_err2;
f9f8d02f 1939
12d7b107 1940 dasd_eckd_validate_server(device, 0);
f4ac1d02
SW
1941
1942 /* device may report different configuration data after LCU setup */
1943 rc = dasd_eckd_read_conf(device);
1944 if (rc)
1945 goto out_err3;
8e09f215
SW
1946
1947 /* Read Feature Codes */
68d1e5f0 1948 dasd_eckd_read_features(device);
40545573 1949
c729696b
JH
1950 /* Read Volume Information */
1951 rc = dasd_eckd_read_vol_info(device);
1952 if (rc)
1953 goto out_err3;
1954
1955 /* Read Extent Pool Information */
1956 rc = dasd_eckd_read_ext_pool_info(device);
1957 if (rc)
1958 goto out_err3;
1959
1da177e4 1960 /* Read Device Characteristics */
68b781fe
SH
1961 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1962 &private->rdc_data, 64);
8e09f215 1963 if (rc) {
b8ed5dd5
SH
1964 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1965 "Read device characteristic failed, rc=%d", rc);
8e09f215
SW
1966 goto out_err3;
1967 }
34cd551a
SH
1968
1969 if ((device->features & DASD_FEATURE_USERAW) &&
1970 !(private->rdc_data.facilities.RT_in_LR)) {
1971 dev_err(&device->cdev->dev, "The storage server does not "
1972 "support raw-track access\n");
1973 rc = -EINVAL;
1974 goto out_err3;
1975 }
1976
817f2c84 1977 /* find the valid cylinder size */
b44b0ab3
SW
1978 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1979 private->rdc_data.long_no_cyl)
1980 private->real_cyl = private->rdc_data.long_no_cyl;
1981 else
1982 private->real_cyl = private->rdc_data.no_cyl;
1983
ef19298b
SW
1984 private->fcx_max_data = get_fcx_max_data(device);
1985
33b62a30
SW
1986 readonly = dasd_device_is_ro(device);
1987 if (readonly)
1988 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
1989
fc19f381 1990 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
33b62a30 1991 "with %d cylinders, %d heads, %d sectors%s\n",
fc19f381
SH
1992 private->rdc_data.dev_type,
1993 private->rdc_data.dev_model,
1994 private->rdc_data.cu_type,
1995 private->rdc_data.cu_model.model,
92636b15 1996 private->real_cyl,
fc19f381 1997 private->rdc_data.trk_per_cyl,
33b62a30
SW
1998 private->rdc_data.sec_per_trk,
1999 readonly ? ", read-only device" : "");
8e09f215
SW
2000 return 0;
2001
2002out_err3:
2003 dasd_alias_disconnect_device_from_lcu(device);
2004out_err2:
2005 dasd_free_block(device->block);
2006 device->block = NULL;
2007out_err1:
4abb08c2 2008 kfree(private->conf_data);
8e09f215
SW
2009 kfree(device->private);
2010 device->private = NULL;
3d052595 2011 return rc;
1da177e4
LT
2012}
2013
8e09f215
SW
2014static void dasd_eckd_uncheck_device(struct dasd_device *device)
2015{
543691a4 2016 struct dasd_eckd_private *private = device->private;
b179b037 2017 int i;
4abb08c2 2018
7c6553d4
SH
2019 if (!private)
2020 return;
2021
8e09f215 2022 dasd_alias_disconnect_device_from_lcu(device);
4abb08c2
SW
2023 private->ned = NULL;
2024 private->sneq = NULL;
2025 private->vdsneq = NULL;
2026 private->gneq = NULL;
2027 private->conf_len = 0;
b179b037 2028 for (i = 0; i < 8; i++) {
c9346151
SH
2029 kfree(device->path[i].conf_data);
2030 if ((__u8 *)device->path[i].conf_data ==
b179b037
SH
2031 private->conf_data) {
2032 private->conf_data = NULL;
2033 private->conf_len = 0;
2034 }
c9346151 2035 device->path[i].conf_data = NULL;
a521b048
SH
2036 device->path[i].cssid = 0;
2037 device->path[i].ssid = 0;
2038 device->path[i].chpid = 0;
b179b037 2039 }
4abb08c2
SW
2040 kfree(private->conf_data);
2041 private->conf_data = NULL;
8e09f215
SW
2042}
2043
1da177e4
LT
2044static struct dasd_ccw_req *
2045dasd_eckd_analysis_ccw(struct dasd_device *device)
2046{
543691a4 2047 struct dasd_eckd_private *private = device->private;
1da177e4
LT
2048 struct eckd_count *count_data;
2049 struct LO_eckd_data *LO_data;
2050 struct dasd_ccw_req *cqr;
2051 struct ccw1 *ccw;
2052 int cplength, datasize;
2053 int i;
2054
1da177e4
LT
2055 cplength = 8;
2056 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
c5205f2f
SO
2057 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2058 NULL);
1da177e4
LT
2059 if (IS_ERR(cqr))
2060 return cqr;
2061 ccw = cqr->cpaddr;
ce6915f5
JH
2062 /* Define extent for the first 2 tracks. */
2063 define_extent(ccw++, cqr->data, 0, 1,
45f186be 2064 DASD_ECKD_CCW_READ_COUNT, device, 0);
8e09f215 2065 LO_data = cqr->data + sizeof(struct DE_eckd_data);
1da177e4
LT
2066 /* Locate record for the first 4 records on track 0. */
2067 ccw[-1].flags |= CCW_FLAG_CC;
2068 locate_record(ccw++, LO_data++, 0, 0, 4,
2069 DASD_ECKD_CCW_READ_COUNT, device, 0);
2070
2071 count_data = private->count_area;
2072 for (i = 0; i < 4; i++) {
2073 ccw[-1].flags |= CCW_FLAG_CC;
2074 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2075 ccw->flags = 0;
2076 ccw->count = 8;
2077 ccw->cda = (__u32)(addr_t) count_data;
2078 ccw++;
2079 count_data++;
2080 }
2081
ce6915f5 2082 /* Locate record for the first record on track 1. */
1da177e4 2083 ccw[-1].flags |= CCW_FLAG_CC;
ce6915f5 2084 locate_record(ccw++, LO_data++, 1, 0, 1,
1da177e4
LT
2085 DASD_ECKD_CCW_READ_COUNT, device, 0);
2086 /* Read count ccw. */
2087 ccw[-1].flags |= CCW_FLAG_CC;
2088 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2089 ccw->flags = 0;
2090 ccw->count = 8;
2091 ccw->cda = (__u32)(addr_t) count_data;
2092
8e09f215
SW
2093 cqr->block = NULL;
2094 cqr->startdev = device;
2095 cqr->memdev = device;
eb6e199b 2096 cqr->retries = 255;
1aae0560 2097 cqr->buildclk = get_tod_clock();
1da177e4
LT
2098 cqr->status = DASD_CQR_FILLED;
2099 return cqr;
2100}
2101
eb6e199b
SW
2102/* differentiate between 'no record found' and any other error */
2103static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2104{
2105 char *sense;
2106 if (init_cqr->status == DASD_CQR_DONE)
2107 return INIT_CQR_OK;
2108 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2109 init_cqr->status == DASD_CQR_FAILED) {
2110 sense = dasd_get_sense(&init_cqr->irb);
2111 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2112 return INIT_CQR_UNFORMATTED;
2113 else
2114 return INIT_CQR_ERROR;
2115 } else
2116 return INIT_CQR_ERROR;
2117}
2118
1da177e4
LT
2119/*
2120 * This is the callback function for the init_analysis cqr. It saves
2121 * the status of the initial analysis ccw before it frees it and kicks
2122 * the device to continue the startup sequence. This will call
2123 * dasd_eckd_do_analysis again (if the devices has not been marked
2124 * for deletion in the meantime).
2125 */
eb6e199b
SW
2126static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2127 void *data)
1da177e4 2128{
543691a4
SO
2129 struct dasd_device *device = init_cqr->startdev;
2130 struct dasd_eckd_private *private = device->private;
1da177e4 2131
eb6e199b 2132 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1da177e4
LT
2133 dasd_sfree_request(init_cqr, device);
2134 dasd_kick_device(device);
2135}
2136
eb6e199b 2137static int dasd_eckd_start_analysis(struct dasd_block *block)
1da177e4 2138{
1da177e4
LT
2139 struct dasd_ccw_req *init_cqr;
2140
8e09f215 2141 init_cqr = dasd_eckd_analysis_ccw(block->base);
1da177e4
LT
2142 if (IS_ERR(init_cqr))
2143 return PTR_ERR(init_cqr);
2144 init_cqr->callback = dasd_eckd_analysis_callback;
2145 init_cqr->callback_data = NULL;
2146 init_cqr->expires = 5*HZ;
eb6e199b
SW
2147 /* first try without ERP, so we can later handle unformatted
2148 * devices as special case
2149 */
2150 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2151 init_cqr->retries = 0;
1da177e4
LT
2152 dasd_add_request_head(init_cqr);
2153 return -EAGAIN;
2154}
2155
eb6e199b 2156static int dasd_eckd_end_analysis(struct dasd_block *block)
1da177e4 2157{
543691a4
SO
2158 struct dasd_device *device = block->base;
2159 struct dasd_eckd_private *private = device->private;
1da177e4
LT
2160 struct eckd_count *count_area;
2161 unsigned int sb, blk_per_trk;
2162 int status, i;
eb6e199b 2163 struct dasd_ccw_req *init_cqr;
1da177e4 2164
1da177e4
LT
2165 status = private->init_cqr_status;
2166 private->init_cqr_status = -1;
eb6e199b
SW
2167 if (status == INIT_CQR_ERROR) {
2168 /* try again, this time with full ERP */
2169 init_cqr = dasd_eckd_analysis_ccw(device);
2170 dasd_sleep_on(init_cqr);
2171 status = dasd_eckd_analysis_evaluation(init_cqr);
2172 dasd_sfree_request(init_cqr, device);
2173 }
2174
e4dbb0f2
SH
2175 if (device->features & DASD_FEATURE_USERAW) {
2176 block->bp_block = DASD_RAW_BLOCKSIZE;
2177 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2178 block->s2b_shift = 3;
2179 goto raw;
2180 }
2181
eb6e199b
SW
2182 if (status == INIT_CQR_UNFORMATTED) {
2183 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1da177e4 2184 return -EMEDIUMTYPE;
eb6e199b
SW
2185 } else if (status == INIT_CQR_ERROR) {
2186 dev_err(&device->cdev->dev,
2187 "Detecting the DASD disk layout failed because "
2188 "of an I/O error\n");
2189 return -EIO;
1da177e4
LT
2190 }
2191
2192 private->uses_cdl = 1;
1da177e4
LT
2193 /* Check Track 0 for Compatible Disk Layout */
2194 count_area = NULL;
2195 for (i = 0; i < 3; i++) {
2196 if (private->count_area[i].kl != 4 ||
3bc9fef9
SH
2197 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2198 private->count_area[i].cyl != 0 ||
2199 private->count_area[i].head != count_area_head[i] ||
2200 private->count_area[i].record != count_area_rec[i]) {
1da177e4
LT
2201 private->uses_cdl = 0;
2202 break;
2203 }
2204 }
2205 if (i == 3)
ce6915f5 2206 count_area = &private->count_area[3];
1da177e4
LT
2207
2208 if (private->uses_cdl == 0) {
2209 for (i = 0; i < 5; i++) {
2210 if ((private->count_area[i].kl != 0) ||
2211 (private->count_area[i].dl !=
3bc9fef9
SH
2212 private->count_area[0].dl) ||
2213 private->count_area[i].cyl != 0 ||
2214 private->count_area[i].head != count_area_head[i] ||
2215 private->count_area[i].record != count_area_rec[i])
1da177e4
LT
2216 break;
2217 }
2218 if (i == 5)
2219 count_area = &private->count_area[0];
2220 } else {
2221 if (private->count_area[3].record == 1)
fc19f381
SH
2222 dev_warn(&device->cdev->dev,
2223 "Track 0 has no records following the VTOC\n");
1da177e4 2224 }
e4dbb0f2 2225
1da177e4
LT
2226 if (count_area != NULL && count_area->kl == 0) {
2227 /* we found notthing violating our disk layout */
2228 if (dasd_check_blocksize(count_area->dl) == 0)
8e09f215 2229 block->bp_block = count_area->dl;
1da177e4 2230 }
8e09f215 2231 if (block->bp_block == 0) {
fc19f381
SH
2232 dev_warn(&device->cdev->dev,
2233 "The disk layout of the DASD is not supported\n");
1da177e4
LT
2234 return -EMEDIUMTYPE;
2235 }
8e09f215
SW
2236 block->s2b_shift = 0; /* bits to shift 512 to get a block */
2237 for (sb = 512; sb < block->bp_block; sb = sb << 1)
2238 block->s2b_shift++;
1da177e4 2239
8e09f215 2240 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
e4dbb0f2
SH
2241
2242raw:
2cc9637c 2243 block->blocks = ((unsigned long) private->real_cyl *
1da177e4
LT
2244 private->rdc_data.trk_per_cyl *
2245 blk_per_trk);
2246
fc19f381 2247 dev_info(&device->cdev->dev,
2cc9637c 2248 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
fc19f381 2249 "%s\n", (block->bp_block >> 10),
2cc9637c 2250 (((unsigned long) private->real_cyl *
fc19f381
SH
2251 private->rdc_data.trk_per_cyl *
2252 blk_per_trk * (block->bp_block >> 9)) >> 1),
2253 ((blk_per_trk * block->bp_block) >> 10),
2254 private->uses_cdl ?
2255 "compatible disk layout" : "linux disk layout");
1da177e4
LT
2256
2257 return 0;
2258}
2259
8e09f215 2260static int dasd_eckd_do_analysis(struct dasd_block *block)
1da177e4 2261{
543691a4 2262 struct dasd_eckd_private *private = block->base->private;
1da177e4 2263
1da177e4 2264 if (private->init_cqr_status < 0)
8e09f215 2265 return dasd_eckd_start_analysis(block);
1da177e4 2266 else
8e09f215 2267 return dasd_eckd_end_analysis(block);
1da177e4
LT
2268}
2269
d42e1712 2270static int dasd_eckd_basic_to_ready(struct dasd_device *device)
8e09f215
SW
2271{
2272 return dasd_alias_add_device(device);
2273};
2274
2275static int dasd_eckd_online_to_ready(struct dasd_device *device)
2276{
669f3765
SH
2277 if (cancel_work_sync(&device->reload_device))
2278 dasd_put_device(device);
2279 if (cancel_work_sync(&device->kick_validate))
2280 dasd_put_device(device);
2281
d42e1712
SH
2282 return 0;
2283};
2284
daa991bf 2285static int dasd_eckd_basic_to_known(struct dasd_device *device)
d42e1712 2286{
8e09f215
SW
2287 return dasd_alias_remove_device(device);
2288};
2289
1da177e4 2290static int
8e09f215 2291dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
1da177e4 2292{
543691a4 2293 struct dasd_eckd_private *private = block->base->private;
1da177e4 2294
8e09f215 2295 if (dasd_check_blocksize(block->bp_block) == 0) {
1da177e4 2296 geo->sectors = recs_per_track(&private->rdc_data,
8e09f215 2297 0, block->bp_block);
1da177e4
LT
2298 }
2299 geo->cylinders = private->rdc_data.no_cyl;
2300 geo->heads = private->rdc_data.trk_per_cyl;
2301 return 0;
2302}
2303
8fd57520
JH
2304/*
2305 * Build the TCW request for the format check
2306 */
2307static struct dasd_ccw_req *
2308dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2309 int enable_pav, struct eckd_count *fmt_buffer,
2310 int rpt)
2311{
2312 struct dasd_eckd_private *start_priv;
2313 struct dasd_device *startdev = NULL;
2314 struct tidaw *last_tidaw = NULL;
2315 struct dasd_ccw_req *cqr;
2316 struct itcw *itcw;
2317 int itcw_size;
2318 int count;
2319 int rc;
2320 int i;
2321
2322 if (enable_pav)
2323 startdev = dasd_alias_get_start_dev(base);
2324
2325 if (!startdev)
2326 startdev = base;
2327
2328 start_priv = startdev->private;
2329
2330 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2331
2332 /*
2333 * we're adding 'count' amount of tidaw to the itcw.
2334 * calculate the corresponding itcw_size
2335 */
2336 itcw_size = itcw_calc_size(0, count, 0);
2337
5e2b17e7 2338 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
8fd57520
JH
2339 if (IS_ERR(cqr))
2340 return cqr;
2341
2342 start_priv->count++;
2343
2344 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2345 if (IS_ERR(itcw)) {
2346 rc = -EINVAL;
2347 goto out_err;
2348 }
2349
2350 cqr->cpaddr = itcw_get_tcw(itcw);
2351 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2352 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2353 sizeof(struct eckd_count),
2354 count * sizeof(struct eckd_count), 0, rpt);
2355 if (rc)
2356 goto out_err;
2357
2358 for (i = 0; i < count; i++) {
2359 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2360 sizeof(struct eckd_count));
2361 if (IS_ERR(last_tidaw)) {
2362 rc = -EINVAL;
2363 goto out_err;
2364 }
2365 }
2366
2367 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2368 itcw_finalize(itcw);
2369
2370 cqr->cpmode = 1;
2371 cqr->startdev = startdev;
2372 cqr->memdev = startdev;
2373 cqr->basedev = base;
2374 cqr->retries = startdev->default_retries;
2375 cqr->expires = startdev->default_expires * HZ;
2376 cqr->buildclk = get_tod_clock();
2377 cqr->status = DASD_CQR_FILLED;
2378 /* Set flags to suppress output for expected errors */
2379 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2380 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2381
2382 return cqr;
2383
2384out_err:
2385 dasd_sfree_request(cqr, startdev);
2386
2387 return ERR_PTR(rc);
2388}
2389
2390/*
2391 * Build the CCW request for the format check
2392 */
2393static struct dasd_ccw_req *
2394dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2395 int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2396{
2397 struct dasd_eckd_private *start_priv;
2398 struct dasd_eckd_private *base_priv;
2399 struct dasd_device *startdev = NULL;
2400 struct dasd_ccw_req *cqr;
2401 struct ccw1 *ccw;
2402 void *data;
2403 int cplength, datasize;
2404 int use_prefix;
2405 int count;
2406 int i;
2407
2408 if (enable_pav)
2409 startdev = dasd_alias_get_start_dev(base);
2410
2411 if (!startdev)
2412 startdev = base;
2413
2414 start_priv = startdev->private;
2415 base_priv = base->private;
2416
2417 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2418
2419 use_prefix = base_priv->features.feature[8] & 0x01;
2420
2421 if (use_prefix) {
2422 cplength = 1;
2423 datasize = sizeof(struct PFX_eckd_data);
2424 } else {
2425 cplength = 2;
2426 datasize = sizeof(struct DE_eckd_data) +
2427 sizeof(struct LO_eckd_data);
2428 }
2429 cplength += count;
2430
5e2b17e7 2431 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
8fd57520
JH
2432 if (IS_ERR(cqr))
2433 return cqr;
2434
2435 start_priv->count++;
2436 data = cqr->data;
2437 ccw = cqr->cpaddr;
2438
2439 if (use_prefix) {
2440 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2441 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2442 count, 0, 0);
2443 } else {
2444 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
45f186be 2445 DASD_ECKD_CCW_READ_COUNT, startdev, 0);
8fd57520
JH
2446
2447 data += sizeof(struct DE_eckd_data);
2448 ccw[-1].flags |= CCW_FLAG_CC;
2449
2450 locate_record(ccw++, data, fdata->start_unit, 0, count,
2451 DASD_ECKD_CCW_READ_COUNT, base, 0);
2452 }
2453
2454 for (i = 0; i < count; i++) {
2455 ccw[-1].flags |= CCW_FLAG_CC;
2456 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2457 ccw->flags = CCW_FLAG_SLI;
2458 ccw->count = 8;
2459 ccw->cda = (__u32)(addr_t) fmt_buffer;
2460 ccw++;
2461 fmt_buffer++;
2462 }
2463
2464 cqr->startdev = startdev;
2465 cqr->memdev = startdev;
2466 cqr->basedev = base;
2467 cqr->retries = DASD_RETRIES;
2468 cqr->expires = startdev->default_expires * HZ;
2469 cqr->buildclk = get_tod_clock();
2470 cqr->status = DASD_CQR_FILLED;
2471 /* Set flags to suppress output for expected errors */
2472 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2473
2474 return cqr;
2475}
2476
1da177e4 2477static struct dasd_ccw_req *
5e2b17e7
JH
2478dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2479 struct format_data_t *fdata, int enable_pav)
1da177e4 2480{
d42e1712
SH
2481 struct dasd_eckd_private *base_priv;
2482 struct dasd_eckd_private *start_priv;
1da177e4
LT
2483 struct dasd_ccw_req *fcp;
2484 struct eckd_count *ect;
d42e1712 2485 struct ch_t address;
1da177e4
LT
2486 struct ccw1 *ccw;
2487 void *data;
b44b0ab3 2488 int rpt;
1da177e4 2489 int cplength, datasize;
d42e1712 2490 int i, j;
f9a28f7b
JBJ
2491 int intensity = 0;
2492 int r0_perm;
d42e1712 2493 int nr_tracks;
18d6624e 2494 int use_prefix;
1da177e4 2495
a94fa154 2496 if (enable_pav)
29b8dd9d
SH
2497 startdev = dasd_alias_get_start_dev(base);
2498
d42e1712
SH
2499 if (!startdev)
2500 startdev = base;
1da177e4 2501
543691a4
SO
2502 start_priv = startdev->private;
2503 base_priv = base->private;
d42e1712
SH
2504
2505 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2506
2507 nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
1da177e4
LT
2508
2509 /*
2510 * fdata->intensity is a bit string that tells us what to do:
2511 * Bit 0: write record zero
2512 * Bit 1: write home address, currently not supported
2513 * Bit 2: invalidate tracks
2514 * Bit 3: use OS/390 compatible disk layout (cdl)
f9a28f7b 2515 * Bit 4: do not allow storage subsystem to modify record zero
1da177e4
LT
2516 * Only some bit combinations do make sense.
2517 */
f9a28f7b
JBJ
2518 if (fdata->intensity & 0x10) {
2519 r0_perm = 0;
2520 intensity = fdata->intensity & ~0x10;
2521 } else {
2522 r0_perm = 1;
2523 intensity = fdata->intensity;
2524 }
d42e1712 2525
18d6624e
SH
2526 use_prefix = base_priv->features.feature[8] & 0x01;
2527
f9a28f7b 2528 switch (intensity) {
1da177e4
LT
2529 case 0x00: /* Normal format */
2530 case 0x08: /* Normal format, use cdl. */
d42e1712 2531 cplength = 2 + (rpt*nr_tracks);
18d6624e
SH
2532 if (use_prefix)
2533 datasize = sizeof(struct PFX_eckd_data) +
2534 sizeof(struct LO_eckd_data) +
2535 rpt * nr_tracks * sizeof(struct eckd_count);
2536 else
2537 datasize = sizeof(struct DE_eckd_data) +
2538 sizeof(struct LO_eckd_data) +
2539 rpt * nr_tracks * sizeof(struct eckd_count);
1da177e4
LT
2540 break;
2541 case 0x01: /* Write record zero and format track. */
2542 case 0x09: /* Write record zero and format track, use cdl. */
d42e1712 2543 cplength = 2 + rpt * nr_tracks;
18d6624e
SH
2544 if (use_prefix)
2545 datasize = sizeof(struct PFX_eckd_data) +
2546 sizeof(struct LO_eckd_data) +
2547 sizeof(struct eckd_count) +
2548 rpt * nr_tracks * sizeof(struct eckd_count);
2549 else
2550 datasize = sizeof(struct DE_eckd_data) +
2551 sizeof(struct LO_eckd_data) +
2552 sizeof(struct eckd_count) +
2553 rpt * nr_tracks * sizeof(struct eckd_count);
1da177e4
LT
2554 break;
2555 case 0x04: /* Invalidate track. */
2556 case 0x0c: /* Invalidate track, use cdl. */
2557 cplength = 3;
18d6624e
SH
2558 if (use_prefix)
2559 datasize = sizeof(struct PFX_eckd_data) +
2560 sizeof(struct LO_eckd_data) +
2561 sizeof(struct eckd_count);
2562 else
2563 datasize = sizeof(struct DE_eckd_data) +
2564 sizeof(struct LO_eckd_data) +
2565 sizeof(struct eckd_count);
1da177e4
LT
2566 break;
2567 default:
d42e1712
SH
2568 dev_warn(&startdev->cdev->dev,
2569 "An I/O control call used incorrect flags 0x%x\n",
2570 fdata->intensity);
1da177e4
LT
2571 return ERR_PTR(-EINVAL);
2572 }
5e2b17e7
JH
2573
2574 fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
1da177e4
LT
2575 if (IS_ERR(fcp))
2576 return fcp;
2577
d42e1712 2578 start_priv->count++;
1da177e4
LT
2579 data = fcp->data;
2580 ccw = fcp->cpaddr;
2581
f9a28f7b 2582 switch (intensity & ~0x08) {
1da177e4 2583 case 0x00: /* Normal format. */
18d6624e
SH
2584 if (use_prefix) {
2585 prefix(ccw++, (struct PFX_eckd_data *) data,
2586 fdata->start_unit, fdata->stop_unit,
2587 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2588 /* grant subsystem permission to format R0 */
2589 if (r0_perm)
2590 ((struct PFX_eckd_data *)data)
2591 ->define_extent.ga_extended |= 0x04;
2592 data += sizeof(struct PFX_eckd_data);
2593 } else {
2594 define_extent(ccw++, (struct DE_eckd_data *) data,
2595 fdata->start_unit, fdata->stop_unit,
45f186be 2596 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
18d6624e
SH
2597 /* grant subsystem permission to format R0 */
2598 if (r0_perm)
2599 ((struct DE_eckd_data *) data)
2600 ->ga_extended |= 0x04;
2601 data += sizeof(struct DE_eckd_data);
2602 }
1da177e4
LT
2603 ccw[-1].flags |= CCW_FLAG_CC;
2604 locate_record(ccw++, (struct LO_eckd_data *) data,
d42e1712
SH
2605 fdata->start_unit, 0, rpt*nr_tracks,
2606 DASD_ECKD_CCW_WRITE_CKD, base,
1da177e4
LT
2607 fdata->blksize);
2608 data += sizeof(struct LO_eckd_data);
2609 break;
2610 case 0x01: /* Write record zero + format track. */
18d6624e
SH
2611 if (use_prefix) {
2612 prefix(ccw++, (struct PFX_eckd_data *) data,
2613 fdata->start_unit, fdata->stop_unit,
2614 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2615 base, startdev);
2616 data += sizeof(struct PFX_eckd_data);
2617 } else {
2618 define_extent(ccw++, (struct DE_eckd_data *) data,
2619 fdata->start_unit, fdata->stop_unit,
45f186be 2620 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
18d6624e
SH
2621 data += sizeof(struct DE_eckd_data);
2622 }
1da177e4
LT
2623 ccw[-1].flags |= CCW_FLAG_CC;
2624 locate_record(ccw++, (struct LO_eckd_data *) data,
d42e1712
SH
2625 fdata->start_unit, 0, rpt * nr_tracks + 1,
2626 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2627 base->block->bp_block);
1da177e4
LT
2628 data += sizeof(struct LO_eckd_data);
2629 break;
2630 case 0x04: /* Invalidate track. */
18d6624e
SH
2631 if (use_prefix) {
2632 prefix(ccw++, (struct PFX_eckd_data *) data,
2633 fdata->start_unit, fdata->stop_unit,
2634 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2635 data += sizeof(struct PFX_eckd_data);
2636 } else {
2637 define_extent(ccw++, (struct DE_eckd_data *) data,
2638 fdata->start_unit, fdata->stop_unit,
45f186be 2639 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
18d6624e
SH
2640 data += sizeof(struct DE_eckd_data);
2641 }
1da177e4
LT
2642 ccw[-1].flags |= CCW_FLAG_CC;
2643 locate_record(ccw++, (struct LO_eckd_data *) data,
2644 fdata->start_unit, 0, 1,
d42e1712 2645 DASD_ECKD_CCW_WRITE_CKD, base, 8);
1da177e4
LT
2646 data += sizeof(struct LO_eckd_data);
2647 break;
2648 }
d42e1712
SH
2649
2650 for (j = 0; j < nr_tracks; j++) {
2651 /* calculate cylinder and head for the current track */
2652 set_ch_t(&address,
2653 (fdata->start_unit + j) /
2654 base_priv->rdc_data.trk_per_cyl,
2655 (fdata->start_unit + j) %
2656 base_priv->rdc_data.trk_per_cyl);
2657 if (intensity & 0x01) { /* write record zero */
1da177e4
LT
2658 ect = (struct eckd_count *) data;
2659 data += sizeof(struct eckd_count);
b44b0ab3
SW
2660 ect->cyl = address.cyl;
2661 ect->head = address.head;
d42e1712 2662 ect->record = 0;
1da177e4 2663 ect->kl = 0;
d42e1712 2664 ect->dl = 8;
1da177e4 2665 ccw[-1].flags |= CCW_FLAG_CC;
d42e1712 2666 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
1da177e4
LT
2667 ccw->flags = CCW_FLAG_SLI;
2668 ccw->count = 8;
2669 ccw->cda = (__u32)(addr_t) ect;
2670 ccw++;
2671 }
d42e1712
SH
2672 if ((intensity & ~0x08) & 0x04) { /* erase track */
2673 ect = (struct eckd_count *) data;
2674 data += sizeof(struct eckd_count);
2675 ect->cyl = address.cyl;
2676 ect->head = address.head;
2677 ect->record = 1;
2678 ect->kl = 0;
2679 ect->dl = 0;
2680 ccw[-1].flags |= CCW_FLAG_CC;
2681 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2682 ccw->flags = CCW_FLAG_SLI;
2683 ccw->count = 8;
2684 ccw->cda = (__u32)(addr_t) ect;
2685 } else { /* write remaining records */
2686 for (i = 0; i < rpt; i++) {
2687 ect = (struct eckd_count *) data;
2688 data += sizeof(struct eckd_count);
2689 ect->cyl = address.cyl;
2690 ect->head = address.head;
2691 ect->record = i + 1;
2692 ect->kl = 0;
2693 ect->dl = fdata->blksize;
2694 /*
2695 * Check for special tracks 0-1
2696 * when formatting CDL
2697 */
2698 if ((intensity & 0x08) &&
46d1c03c 2699 address.cyl == 0 && address.head == 0) {
d42e1712
SH
2700 if (i < 3) {
2701 ect->kl = 4;
2702 ect->dl = sizes_trk0[i] - 4;
2703 }
2704 }
2705 if ((intensity & 0x08) &&
46d1c03c 2706 address.cyl == 0 && address.head == 1) {
d42e1712
SH
2707 ect->kl = 44;
2708 ect->dl = LABEL_SIZE - 44;
2709 }
2710 ccw[-1].flags |= CCW_FLAG_CC;
2711 if (i != 0 || j == 0)
2712 ccw->cmd_code =
2713 DASD_ECKD_CCW_WRITE_CKD;
2714 else
2715 ccw->cmd_code =
2716 DASD_ECKD_CCW_WRITE_CKD_MT;
2717 ccw->flags = CCW_FLAG_SLI;
2718 ccw->count = 8;
ba21d0ea
SH
2719 ccw->cda = (__u32)(addr_t) ect;
2720 ccw++;
d42e1712
SH
2721 }
2722 }
1da177e4 2723 }
d42e1712
SH
2724
2725 fcp->startdev = startdev;
2726 fcp->memdev = startdev;
29b8dd9d 2727 fcp->basedev = base;
eb6e199b 2728 fcp->retries = 256;
d42e1712 2729 fcp->expires = startdev->default_expires * HZ;
1aae0560 2730 fcp->buildclk = get_tod_clock();
1da177e4 2731 fcp->status = DASD_CQR_FILLED;
d42e1712 2732
1da177e4
LT
2733 return fcp;
2734}
2735
570d237c
JH
2736/*
2737 * Wrapper function to build a CCW request depending on input data
2738 */
2739static struct dasd_ccw_req *
2740dasd_eckd_format_build_ccw_req(struct dasd_device *base,
8fd57520
JH
2741 struct format_data_t *fdata, int enable_pav,
2742 int tpm, struct eckd_count *fmt_buffer, int rpt)
570d237c 2743{
8fd57520
JH
2744 struct dasd_ccw_req *ccw_req;
2745
2746 if (!fmt_buffer) {
5e2b17e7 2747 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
8fd57520
JH
2748 } else {
2749 if (tpm)
2750 ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2751 enable_pav,
2752 fmt_buffer, rpt);
2753 else
2754 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2755 fmt_buffer, rpt);
2756 }
2757
2758 return ccw_req;
570d237c
JH
2759}
2760
2761/*
2762 * Sanity checks on format_data
2763 */
2764static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2765 struct format_data_t *fdata)
d42e1712 2766{
543691a4 2767 struct dasd_eckd_private *private = base->private;
d42e1712 2768
d42e1712
SH
2769 if (fdata->start_unit >=
2770 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2771 dev_warn(&base->cdev->dev,
2772 "Start track number %u used in formatting is too big\n",
2773 fdata->start_unit);
2774 return -EINVAL;
2775 }
2776 if (fdata->stop_unit >=
2777 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2778 dev_warn(&base->cdev->dev,
2779 "Stop track number %u used in formatting is too big\n",
2780 fdata->stop_unit);
2781 return -EINVAL;
2782 }
2783 if (fdata->start_unit > fdata->stop_unit) {
2784 dev_warn(&base->cdev->dev,
2785 "Start track %u used in formatting exceeds end track\n",
2786 fdata->start_unit);
2787 return -EINVAL;
2788 }
2789 if (dasd_check_blocksize(fdata->blksize) != 0) {
2790 dev_warn(&base->cdev->dev,
2791 "The DASD cannot be formatted with block size %u\n",
2792 fdata->blksize);
2793 return -EINVAL;
2794 }
570d237c
JH
2795 return 0;
2796}
2797
2798/*
2799 * This function will process format_data originally coming from an IOCTL
2800 */
2801static int dasd_eckd_format_process_data(struct dasd_device *base,
2802 struct format_data_t *fdata,
8fd57520
JH
2803 int enable_pav, int tpm,
2804 struct eckd_count *fmt_buffer, int rpt,
2805 struct irb *irb)
570d237c 2806{
543691a4 2807 struct dasd_eckd_private *private = base->private;
570d237c 2808 struct dasd_ccw_req *cqr, *n;
570d237c
JH
2809 struct list_head format_queue;
2810 struct dasd_device *device;
8fd57520 2811 char *sense = NULL;
570d237c
JH
2812 int old_start, old_stop, format_step;
2813 int step, retry;
2814 int rc;
2815
570d237c
JH
2816 rc = dasd_eckd_format_sanity_checks(base, fdata);
2817 if (rc)
2818 return rc;
d42e1712
SH
2819
2820 INIT_LIST_HEAD(&format_queue);
d42e1712 2821
46d1c03c 2822 old_start = fdata->start_unit;
29b8dd9d 2823 old_stop = fdata->stop_unit;
d42e1712 2824
8fd57520
JH
2825 if (!tpm && fmt_buffer != NULL) {
2826 /* Command Mode / Format Check */
2827 format_step = 1;
2828 } else if (tpm && fmt_buffer != NULL) {
2829 /* Transport Mode / Format Check */
2830 format_step = DASD_CQR_MAX_CCW / rpt;
2831 } else {
2832 /* Normal Formatting */
2833 format_step = DASD_CQR_MAX_CCW /
2834 recs_per_track(&private->rdc_data, 0, fdata->blksize);
2835 }
2836
46d1c03c
JH
2837 do {
2838 retry = 0;
2839 while (fdata->start_unit <= old_stop) {
2840 step = fdata->stop_unit - fdata->start_unit + 1;
2841 if (step > format_step) {
2842 fdata->stop_unit =
2843 fdata->start_unit + format_step - 1;
2844 }
d42e1712 2845
570d237c 2846 cqr = dasd_eckd_format_build_ccw_req(base, fdata,
8fd57520
JH
2847 enable_pav, tpm,
2848 fmt_buffer, rpt);
46d1c03c
JH
2849 if (IS_ERR(cqr)) {
2850 rc = PTR_ERR(cqr);
2851 if (rc == -ENOMEM) {
2852 if (list_empty(&format_queue))
2853 goto out;
2854 /*
2855 * not enough memory available, start
2856 * requests retry after first requests
2857 * were finished
2858 */
2859 retry = 1;
2860 break;
2861 }
2862 goto out_err;
2863 }
2864 list_add_tail(&cqr->blocklist, &format_queue);
d42e1712 2865
8fd57520
JH
2866 if (fmt_buffer) {
2867 step = fdata->stop_unit - fdata->start_unit + 1;
2868 fmt_buffer += rpt * step;
2869 }
46d1c03c
JH
2870 fdata->start_unit = fdata->stop_unit + 1;
2871 fdata->stop_unit = old_stop;
d42e1712 2872 }
d42e1712 2873
46d1c03c
JH
2874 rc = dasd_sleep_on_queue(&format_queue);
2875
2876out_err:
2877 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
2878 device = cqr->startdev;
543691a4 2879 private = device->private;
8fd57520
JH
2880
2881 if (cqr->status == DASD_CQR_FAILED) {
2882 /*
2883 * Only get sense data if called by format
2884 * check
2885 */
2886 if (fmt_buffer && irb) {
2887 sense = dasd_get_sense(&cqr->irb);
2888 memcpy(irb, &cqr->irb, sizeof(*irb));
2889 }
46d1c03c 2890 rc = -EIO;
8fd57520 2891 }
46d1c03c 2892 list_del_init(&cqr->blocklist);
5e2b17e7 2893 dasd_ffree_request(cqr, device);
46d1c03c
JH
2894 private->count--;
2895 }
d42e1712 2896
8fd57520 2897 if (rc && rc != -EIO)
46d1c03c 2898 goto out;
8fd57520
JH
2899 if (rc == -EIO) {
2900 /*
2901 * In case fewer than the expected records are on the
2902 * track, we will most likely get a 'No Record Found'
2903 * error (in command mode) or a 'File Protected' error
2904 * (in transport mode). Those particular cases shouldn't
2905 * pass the -EIO to the IOCTL, therefore reset the rc
2906 * and continue.
2907 */
2908 if (sense &&
2909 (sense[1] & SNS1_NO_REC_FOUND ||
2910 sense[1] & SNS1_FILE_PROTECTED))
2911 retry = 1;
2912 else
2913 goto out;
2914 }
d42e1712 2915
46d1c03c 2916 } while (retry);
29b8dd9d 2917
46d1c03c
JH
2918out:
2919 fdata->start_unit = old_start;
2920 fdata->stop_unit = old_stop;
d42e1712
SH
2921
2922 return rc;
2923}
2924
570d237c
JH
2925static int dasd_eckd_format_device(struct dasd_device *base,
2926 struct format_data_t *fdata, int enable_pav)
2927{
8fd57520
JH
2928 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
2929 0, NULL);
2930}
2931
5e2b17e7
JH
2932/*
2933 * Callback function to free ESE format requests.
2934 */
2935static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
2936{
2937 struct dasd_device *device = cqr->startdev;
2938 struct dasd_eckd_private *private = device->private;
2939
2940 private->count--;
2941 dasd_ffree_request(cqr, device);
2942}
2943
2944static struct dasd_ccw_req *
2945dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
2946{
2947 struct dasd_eckd_private *private;
2948 struct format_data_t fdata;
2949 unsigned int recs_per_trk;
2950 struct dasd_ccw_req *fcqr;
2951 struct dasd_device *base;
2952 struct dasd_block *block;
2953 unsigned int blksize;
2954 struct request *req;
2955 sector_t first_trk;
2956 sector_t last_trk;
2957 int rc;
2958
2959 req = cqr->callback_data;
2960 base = cqr->block->base;
2961 private = base->private;
2962 block = base->block;
2963 blksize = block->bp_block;
2964 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2965
2966 first_trk = blk_rq_pos(req) >> block->s2b_shift;
2967 sector_div(first_trk, recs_per_trk);
2968 last_trk =
2969 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2970 sector_div(last_trk, recs_per_trk);
2971
2972 fdata.start_unit = first_trk;
2973 fdata.stop_unit = last_trk;
2974 fdata.blksize = blksize;
2975 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
2976
2977 rc = dasd_eckd_format_sanity_checks(base, &fdata);
2978 if (rc)
2979 return ERR_PTR(-EINVAL);
2980
2981 /*
2982 * We're building the request with PAV disabled as we're reusing
2983 * the former startdev.
2984 */
2985 fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
2986 if (IS_ERR(fcqr))
2987 return fcqr;
2988
2989 fcqr->callback = dasd_eckd_ese_format_cb;
2990
2991 return fcqr;
2992}
2993
2994/*
2995 * When data is read from an unformatted area of an ESE volume, this function
2996 * returns zeroed data and thereby mimics a read of zero data.
2997 */
2998static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
2999{
3000 unsigned int blksize, off;
3001 struct dasd_device *base;
3002 struct req_iterator iter;
3003 struct request *req;
3004 struct bio_vec bv;
3005 char *dst;
3006
3007 req = (struct request *) cqr->callback_data;
3008 base = cqr->block->base;
3009 blksize = base->block->bp_block;
3010
3011 rq_for_each_segment(bv, req, iter) {
3012 dst = page_address(bv.bv_page) + bv.bv_offset;
3013 for (off = 0; off < bv.bv_len; off += blksize) {
3014 if (dst && rq_data_dir(req) == READ) {
3015 dst += off;
3016 memset(dst, 0, blksize);
3017 }
3018 }
3019 }
3020}
3021
8fd57520
JH
3022/*
3023 * Helper function to count consecutive records of a single track.
3024 */
3025static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3026 int max)
3027{
3028 int head;
3029 int i;
3030
3031 head = fmt_buffer[start].head;
3032
3033 /*
3034 * There are 3 conditions where we stop counting:
3035 * - if data reoccurs (same head and record may reoccur), which may
3036 * happen due to the way DASD_ECKD_CCW_READ_COUNT works
3037 * - when the head changes, because we're iterating over several tracks
3038 * then (DASD_ECKD_CCW_READ_COUNT_MT)
3039 * - when we've reached the end of sensible data in the buffer (the
3040 * record will be 0 then)
3041 */
3042 for (i = start; i < max; i++) {
3043 if (i > start) {
3044 if ((fmt_buffer[i].head == head &&
3045 fmt_buffer[i].record == 1) ||
3046 fmt_buffer[i].head != head ||
3047 fmt_buffer[i].record == 0)
3048 break;
3049 }
3050 }
3051
3052 return i - start;
3053}
3054
3055/*
3056 * Evaluate a given range of tracks. Data like number of records, blocksize,
3057 * record ids, and key length are compared with expected data.
3058 *
3059 * If a mismatch occurs, the corresponding error bit is set, as well as
3060 * additional information, depending on the error.
3061 */
3062static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3063 struct format_check_t *cdata,
3064 int rpt_max, int rpt_exp,
3065 int trk_per_cyl, int tpm)
3066{
3067 struct ch_t geo;
3068 int max_entries;
3069 int count = 0;
3070 int trkcount;
3071 int blksize;
3072 int pos = 0;
3073 int i, j;
3074 int kl;
3075
3076 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3077 max_entries = trkcount * rpt_max;
3078
3079 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3080 /* Calculate the correct next starting position in the buffer */
3081 if (tpm) {
3082 while (fmt_buffer[pos].record == 0 &&
3083 fmt_buffer[pos].dl == 0) {
3084 if (pos++ > max_entries)
3085 break;
3086 }
3087 } else {
3088 if (i != cdata->expect.start_unit)
3089 pos += rpt_max - count;
3090 }
3091
3092 /* Calculate the expected geo values for the current track */
3093 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3094
3095 /* Count and check number of records */
3096 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3097
3098 if (count < rpt_exp) {
3099 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3100 break;
3101 }
3102 if (count > rpt_exp) {
3103 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3104 break;
3105 }
3106
3107 for (j = 0; j < count; j++, pos++) {
3108 blksize = cdata->expect.blksize;
3109 kl = 0;
3110
3111 /*
3112 * Set special values when checking CDL formatted
3113 * devices.
3114 */
3115 if ((cdata->expect.intensity & 0x08) &&
3116 geo.cyl == 0 && geo.head == 0) {
3117 if (j < 3) {
3118 blksize = sizes_trk0[j] - 4;
3119 kl = 4;
3120 }
3121 }
3122 if ((cdata->expect.intensity & 0x08) &&
3123 geo.cyl == 0 && geo.head == 1) {
3124 blksize = LABEL_SIZE - 44;
3125 kl = 44;
3126 }
3127
3128 /* Check blocksize */
3129 if (fmt_buffer[pos].dl != blksize) {
3130 cdata->result = DASD_FMT_ERR_BLKSIZE;
3131 goto out;
3132 }
3133 /* Check if key length is 0 */
3134 if (fmt_buffer[pos].kl != kl) {
3135 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3136 goto out;
3137 }
3138 /* Check if record_id is correct */
3139 if (fmt_buffer[pos].cyl != geo.cyl ||
3140 fmt_buffer[pos].head != geo.head ||
3141 fmt_buffer[pos].record != (j + 1)) {
3142 cdata->result = DASD_FMT_ERR_RECORD_ID;
3143 goto out;
3144 }
3145 }
3146 }
3147
3148out:
3149 /*
3150 * In case of no errors, we need to decrease by one
3151 * to get the correct positions.
3152 */
3153 if (!cdata->result) {
3154 i--;
3155 pos--;
3156 }
3157
3158 cdata->unit = i;
3159 cdata->num_records = count;
3160 cdata->rec = fmt_buffer[pos].record;
3161 cdata->blksize = fmt_buffer[pos].dl;
3162 cdata->key_length = fmt_buffer[pos].kl;
3163}
3164
3165/*
3166 * Check the format of a range of tracks of a DASD.
3167 */
3168static int dasd_eckd_check_device_format(struct dasd_device *base,
3169 struct format_check_t *cdata,
3170 int enable_pav)
3171{
3172 struct dasd_eckd_private *private = base->private;
3173 struct eckd_count *fmt_buffer;
3174 struct irb irb;
3175 int rpt_max, rpt_exp;
3176 int fmt_buffer_size;
3177 int trk_per_cyl;
3178 int trkcount;
3179 int tpm = 0;
3180 int rc;
3181
3182 trk_per_cyl = private->rdc_data.trk_per_cyl;
3183
3184 /* Get maximum and expected amount of records per track */
3185 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3186 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3187
3188 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3189 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3190
3191 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3192 if (!fmt_buffer)
3193 return -ENOMEM;
3194
3195 /*
3196 * A certain FICON feature subset is needed to operate in transport
3197 * mode. Additionally, the support for transport mode is implicitly
3198 * checked by comparing the buffer size with fcx_max_data. As long as
3199 * the buffer size is smaller we can operate in transport mode and
3200 * process multiple tracks. If not, only one track at once is being
3201 * processed using command mode.
3202 */
3203 if ((private->features.feature[40] & 0x04) &&
3204 fmt_buffer_size <= private->fcx_max_data)
3205 tpm = 1;
3206
3207 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3208 tpm, fmt_buffer, rpt_max, &irb);
3209 if (rc && rc != -EIO)
3210 goto out;
3211 if (rc == -EIO) {
3212 /*
3213 * If our first attempt with transport mode enabled comes back
3214 * with an incorrect length error, we're going to retry the
3215 * check with command mode.
3216 */
3217 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3218 tpm = 0;
3219 rc = dasd_eckd_format_process_data(base, &cdata->expect,
3220 enable_pav, tpm,
3221 fmt_buffer, rpt_max,
3222 &irb);
3223 if (rc)
3224 goto out;
3225 } else {
3226 goto out;
3227 }
3228 }
3229
3230 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3231 trk_per_cyl, tpm);
3232
3233out:
3234 kfree(fmt_buffer);
3235
3236 return rc;
570d237c
JH
3237}
3238
8e09f215 3239static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1da177e4 3240{
a2ace466
HR
3241 if (cqr->retries < 0) {
3242 cqr->status = DASD_CQR_FAILED;
3243 return;
3244 }
8e09f215
SW
3245 cqr->status = DASD_CQR_FILLED;
3246 if (cqr->block && (cqr->startdev != cqr->block->base)) {
3247 dasd_eckd_reset_ccw_to_base_io(cqr);
3248 cqr->startdev = cqr->block->base;
c9346151 3249 cqr->lpm = dasd_path_get_opm(cqr->block->base);
1da177e4 3250 }
8e09f215 3251};
1da177e4
LT
3252
3253static dasd_erp_fn_t
3254dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3255{
8e09f215 3256 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
1da177e4
LT
3257 struct ccw_device *cdev = device->cdev;
3258
3259 switch (cdev->id.cu_type) {
3260 case 0x3990:
3261 case 0x2105:
3262 case 0x2107:
3263 case 0x1750:
3264 return dasd_3990_erp_action;
3265 case 0x9343:
3266 case 0x3880:
3267 default:
3268 return dasd_default_erp_action;
3269 }
3270}
3271
3272static dasd_erp_fn_t
3273dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3274{
3275 return dasd_default_erp_postaction;
3276}
3277
5a27e60d
SW
3278static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3279 struct dasd_ccw_req *cqr,
3280 struct irb *irb)
8e09f215
SW
3281{
3282 char mask;
f3eb5384 3283 char *sense = NULL;
543691a4 3284 struct dasd_eckd_private *private = device->private;
8e09f215
SW
3285
3286 /* first of all check for state change pending interrupt */
3287 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
f3eb5384 3288 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
c8d1c0ff
SH
3289 /*
3290 * for alias only, not in offline processing
3291 * and only if not suspended
3292 */
501183f2 3293 if (!device->block && private->lcu &&
25e2cf1c 3294 device->state == DASD_STATE_ONLINE &&
c8d1c0ff
SH
3295 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3296 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
501183f2
SH
3297 /* schedule worker to reload device */
3298 dasd_reload_device(device);
3299 }
8e09f215
SW
3300 dasd_generic_handle_state_change(device);
3301 return;
3302 }
3303
a5a0061f 3304 sense = dasd_get_sense(irb);
5a27e60d
SW
3305 if (!sense)
3306 return;
3307
3308 /* summary unit check */
c7a29e56 3309 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
a5a0061f 3310 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
59a9ed5f
SH
3311 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3312 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3313 "eckd suc: device already notified");
3314 return;
3315 }
3316 sense = dasd_get_sense(irb);
3317 if (!sense) {
3318 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3319 "eckd suc: no reason code available");
3320 clear_bit(DASD_FLAG_SUC, &device->flags);
3321 return;
3322
3323 }
3324 private->suc_reason = sense[8];
3325 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3326 "eckd handle summary unit check: reason",
3327 private->suc_reason);
3328 dasd_get_device(device);
3329 if (!schedule_work(&device->suc_work))
3330 dasd_put_device(device);
3331
8e09f215
SW
3332 return;
3333 }
3334
f60c768c 3335 /* service information message SIM */
5a27e60d 3336 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
f3eb5384
SW
3337 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3338 dasd_3990_erp_handle_sim(device, sense);
f60c768c
SH
3339 return;
3340 }
3341
5a27e60d
SW
3342 /* loss of device reservation is handled via base devices only
3343 * as alias devices may be used with several bases
3344 */
c7a29e56
SW
3345 if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3346 (sense[7] == 0x3F) &&
5a27e60d
SW
3347 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3348 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3349 if (device->features & DASD_FEATURE_FAILONSLCK)
3350 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3351 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3352 dev_err(&device->cdev->dev,
3353 "The device reservation was lost\n");
ada3df91 3354 }
5a27e60d 3355}
f3eb5384 3356
91dc4a19
JH
3357static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3358 unsigned int first_trk,
3359 unsigned int last_trk)
3360{
3361 struct dasd_eckd_private *private = device->private;
3362 unsigned int trks_per_vol;
3363 int rc = 0;
3364
3365 trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3366
3367 if (first_trk >= trks_per_vol) {
3368 dev_warn(&device->cdev->dev,
3369 "Start track number %u used in the space release command is too big\n",
3370 first_trk);
3371 rc = -EINVAL;
3372 } else if (last_trk >= trks_per_vol) {
3373 dev_warn(&device->cdev->dev,
3374 "Stop track number %u used in the space release command is too big\n",
3375 last_trk);
3376 rc = -EINVAL;
3377 } else if (first_trk > last_trk) {
3378 dev_warn(&device->cdev->dev,
3379 "Start track %u used in the space release command exceeds the end track\n",
3380 first_trk);
3381 rc = -EINVAL;
3382 }
3383 return rc;
3384}
3385
3386/*
3387 * Helper function to count the amount of involved extents within a given range
3388 * with extent alignment in mind.
3389 */
3390static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3391{
3392 int cur_pos = 0;
3393 int count = 0;
3394 int tmp;
3395
3396 if (from == to)
3397 return 1;
3398
3399 /* Count first partial extent */
3400 if (from % trks_per_ext != 0) {
3401 tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3402 if (tmp > to)
3403 tmp = to;
3404 cur_pos = tmp - from + 1;
3405 count++;
3406 }
3407 /* Count full extents */
3408 if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3409 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3410 count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3411 cur_pos = tmp;
3412 }
3413 /* Count last partial extent */
3414 if (cur_pos < to)
3415 count++;
3416
3417 return count;
3418}
3419
3420/*
3421 * Release allocated space for a given range or an entire volume.
3422 */
3423static struct dasd_ccw_req *
3424dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3425 struct request *req, unsigned int first_trk,
3426 unsigned int last_trk, int by_extent)
3427{
3428 struct dasd_eckd_private *private = device->private;
3429 struct dasd_dso_ras_ext_range *ras_range;
3430 struct dasd_rssd_features *features;
3431 struct dasd_dso_ras_data *ras_data;
3432 u16 heads, beg_head, end_head;
3433 int cur_to_trk, cur_from_trk;
3434 struct dasd_ccw_req *cqr;
3435 u32 beg_cyl, end_cyl;
3436 struct ccw1 *ccw;
3437 int trks_per_ext;
3438 size_t ras_size;
3439 size_t size;
3440 int nr_exts;
3441 void *rq;
3442 int i;
3443
3444 if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3445 return ERR_PTR(-EINVAL);
3446
3447 rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3448
3449 features = &private->features;
3450
3451 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3452 nr_exts = 0;
3453 if (by_extent)
3454 nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3455 ras_size = sizeof(*ras_data);
3456 size = ras_size + (nr_exts * sizeof(*ras_range));
3457
3458 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3459 if (IS_ERR(cqr)) {
3460 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3461 "Could not allocate RAS request");
3462 return cqr;
3463 }
3464
3465 ras_data = cqr->data;
3466 memset(ras_data, 0, size);
3467
3468 ras_data->order = DSO_ORDER_RAS;
3469 ras_data->flags.vol_type = 0; /* CKD volume */
3470 /* Release specified extents or entire volume */
3471 ras_data->op_flags.by_extent = by_extent;
3472 /*
3473 * This bit guarantees initialisation of tracks within an extent that is
3474 * not fully specified, but is only supported with a certain feature
3475 * subset.
3476 */
3477 ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
3478 ras_data->lss = private->ned->ID;
3479 ras_data->dev_addr = private->ned->unit_addr;
3480 ras_data->nr_exts = nr_exts;
3481
3482 if (by_extent) {
3483 heads = private->rdc_data.trk_per_cyl;
3484 cur_from_trk = first_trk;
3485 cur_to_trk = first_trk + trks_per_ext -
3486 (first_trk % trks_per_ext) - 1;
3487 if (cur_to_trk > last_trk)
3488 cur_to_trk = last_trk;
3489 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3490
3491 for (i = 0; i < nr_exts; i++) {
3492 beg_cyl = cur_from_trk / heads;
3493 beg_head = cur_from_trk % heads;
3494 end_cyl = cur_to_trk / heads;
3495 end_head = cur_to_trk % heads;
3496
3497 set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3498 set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3499
3500 cur_from_trk = cur_to_trk + 1;
3501 cur_to_trk = cur_from_trk + trks_per_ext - 1;
3502 if (cur_to_trk > last_trk)
3503 cur_to_trk = last_trk;
3504 ras_range++;
3505 }
3506 }
3507
3508 ccw = cqr->cpaddr;
3509 ccw->cda = (__u32)(addr_t)cqr->data;
3510 ccw->cmd_code = DASD_ECKD_CCW_DSO;
3511 ccw->count = size;
3512
3513 cqr->startdev = device;
3514 cqr->memdev = device;
3515 cqr->block = block;
3516 cqr->retries = 256;
3517 cqr->expires = device->default_expires * HZ;
3518 cqr->buildclk = get_tod_clock();
3519 cqr->status = DASD_CQR_FILLED;
3520
3521 return cqr;
3522}
3523
3524static int dasd_eckd_release_space_full(struct dasd_device *device)
3525{
3526 struct dasd_ccw_req *cqr;
3527 int rc;
3528
3529 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3530 if (IS_ERR(cqr))
3531 return PTR_ERR(cqr);
3532
3533 rc = dasd_sleep_on_interruptible(cqr);
3534
3535 dasd_sfree_request(cqr, cqr->memdev);
3536
3537 return rc;
3538}
3539
3540static int dasd_eckd_release_space_trks(struct dasd_device *device,
3541 unsigned int from, unsigned int to)
3542{
3543 struct dasd_eckd_private *private = device->private;
3544 struct dasd_block *block = device->block;
3545 struct dasd_ccw_req *cqr, *n;
3546 struct list_head ras_queue;
3547 unsigned int device_exts;
3548 int trks_per_ext;
3549 int stop, step;
3550 int cur_pos;
3551 int rc = 0;
3552 int retry;
3553
3554 INIT_LIST_HEAD(&ras_queue);
3555
3556 device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3557 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3558
3559 /* Make sure device limits are not exceeded */
3560 step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3561 cur_pos = from;
3562
3563 do {
3564 retry = 0;
3565 while (cur_pos < to) {
3566 stop = cur_pos + step -
3567 ((cur_pos + step) % trks_per_ext) - 1;
3568 if (stop > to)
3569 stop = to;
3570
3571 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3572 if (IS_ERR(cqr)) {
3573 rc = PTR_ERR(cqr);
3574 if (rc == -ENOMEM) {
3575 if (list_empty(&ras_queue))
3576 goto out;
3577 retry = 1;
3578 break;
3579 }
3580 goto err_out;
3581 }
3582
3583 spin_lock_irq(&block->queue_lock);
3584 list_add_tail(&cqr->blocklist, &ras_queue);
3585 spin_unlock_irq(&block->queue_lock);
3586 cur_pos = stop + 1;
3587 }
3588
3589 rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3590
3591err_out:
3592 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3593 device = cqr->startdev;
3594 private = device->private;
3595
3596 spin_lock_irq(&block->queue_lock);
3597 list_del_init(&cqr->blocklist);
3598 spin_unlock_irq(&block->queue_lock);
3599 dasd_sfree_request(cqr, device);
3600 private->count--;
3601 }
3602 } while (retry);
3603
3604out:
3605 return rc;
3606}
3607
3608static int dasd_eckd_release_space(struct dasd_device *device,
3609 struct format_data_t *rdata)
3610{
3611 if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3612 return dasd_eckd_release_space_full(device);
3613 else if (rdata->intensity == 0)
3614 return dasd_eckd_release_space_trks(device, rdata->start_unit,
3615 rdata->stop_unit);
3616 else
3617 return -EINVAL;
3618}
3619
f3eb5384
SW
3620static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3621 struct dasd_device *startdev,
8e09f215 3622 struct dasd_block *block,
f3eb5384
SW
3623 struct request *req,
3624 sector_t first_rec,
3625 sector_t last_rec,
3626 sector_t first_trk,
3627 sector_t last_trk,
3628 unsigned int first_offs,
3629 unsigned int last_offs,
3630 unsigned int blk_per_trk,
3631 unsigned int blksize)
1da177e4
LT
3632{
3633 struct dasd_eckd_private *private;
3634 unsigned long *idaws;
3635 struct LO_eckd_data *LO_data;
3636 struct dasd_ccw_req *cqr;
3637 struct ccw1 *ccw;
5705f702 3638 struct req_iterator iter;
7988613b 3639 struct bio_vec bv;
1da177e4 3640 char *dst;
f3eb5384 3641 unsigned int off;
1da177e4 3642 int count, cidaw, cplength, datasize;
f3eb5384 3643 sector_t recid;
1da177e4 3644 unsigned char cmd, rcmd;
8e09f215
SW
3645 int use_prefix;
3646 struct dasd_device *basedev;
1da177e4 3647
8e09f215 3648 basedev = block->base;
543691a4 3649 private = basedev->private;
1da177e4
LT
3650 if (rq_data_dir(req) == READ)
3651 cmd = DASD_ECKD_CCW_READ_MT;
3652 else if (rq_data_dir(req) == WRITE)
3653 cmd = DASD_ECKD_CCW_WRITE_MT;
3654 else
3655 return ERR_PTR(-EINVAL);
f3eb5384 3656
1da177e4
LT
3657 /* Check struct bio and count the number of blocks for the request. */
3658 count = 0;
3659 cidaw = 0;
5705f702 3660 rq_for_each_segment(bv, req, iter) {
7988613b 3661 if (bv.bv_len & (blksize - 1))
6c92e699
JA
3662 /* Eckd can only do full blocks. */
3663 return ERR_PTR(-EINVAL);
7988613b 3664 count += bv.bv_len >> (block->s2b_shift + 9);
7988613b
KO
3665 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3666 cidaw += bv.bv_len >> (block->s2b_shift + 9);
1da177e4
LT
3667 }
3668 /* Paranoia. */
3669 if (count != last_rec - first_rec + 1)
3670 return ERR_PTR(-EINVAL);
8e09f215
SW
3671
3672 /* use the prefix command if available */
3673 use_prefix = private->features.feature[8] & 0x01;
3674 if (use_prefix) {
3675 /* 1x prefix + number of blocks */
3676 cplength = 2 + count;
3677 /* 1x prefix + cidaws*sizeof(long) */
3678 datasize = sizeof(struct PFX_eckd_data) +
3679 sizeof(struct LO_eckd_data) +
3680 cidaw * sizeof(unsigned long);
3681 } else {
3682 /* 1x define extent + 1x locate record + number of blocks */
3683 cplength = 2 + count;
3684 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3685 datasize = sizeof(struct DE_eckd_data) +
3686 sizeof(struct LO_eckd_data) +
3687 cidaw * sizeof(unsigned long);
3688 }
1da177e4
LT
3689 /* Find out the number of additional locate record ccws for cdl. */
3690 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3691 if (last_rec >= 2*blk_per_trk)
3692 count = 2*blk_per_trk - first_rec;
3693 cplength += count;
3694 datasize += count*sizeof(struct LO_eckd_data);
3695 }
3696 /* Allocate the ccw request. */
68b781fe 3697 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
c5205f2f 3698 startdev, blk_mq_rq_to_pdu(req));
1da177e4
LT
3699 if (IS_ERR(cqr))
3700 return cqr;
3701 ccw = cqr->cpaddr;
8e09f215
SW
3702 /* First ccw is define extent or prefix. */
3703 if (use_prefix) {
3704 if (prefix(ccw++, cqr->data, first_trk,
3705 last_trk, cmd, basedev, startdev) == -EAGAIN) {
3706 /* Clock not in sync and XRC is enabled.
3707 * Try again later.
3708 */
3709 dasd_sfree_request(cqr, startdev);
3710 return ERR_PTR(-EAGAIN);
3711 }
3712 idaws = (unsigned long *) (cqr->data +
3713 sizeof(struct PFX_eckd_data));
3714 } else {
3715 if (define_extent(ccw++, cqr->data, first_trk,
45f186be 3716 last_trk, cmd, basedev, 0) == -EAGAIN) {
8e09f215
SW
3717 /* Clock not in sync and XRC is enabled.
3718 * Try again later.
3719 */
3720 dasd_sfree_request(cqr, startdev);
3721 return ERR_PTR(-EAGAIN);
3722 }
3723 idaws = (unsigned long *) (cqr->data +
3724 sizeof(struct DE_eckd_data));
d54853ef 3725 }
1da177e4 3726 /* Build locate_record+read/write/ccws. */
1da177e4
LT
3727 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
3728 recid = first_rec;
3729 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
3730 /* Only standard blocks so there is just one locate record. */
3731 ccw[-1].flags |= CCW_FLAG_CC;
3732 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
8e09f215 3733 last_rec - recid + 1, cmd, basedev, blksize);
1da177e4 3734 }
5705f702 3735 rq_for_each_segment(bv, req, iter) {
7988613b 3736 dst = page_address(bv.bv_page) + bv.bv_offset;
1da177e4
LT
3737 if (dasd_page_cache) {
3738 char *copy = kmem_cache_alloc(dasd_page_cache,
441e143e 3739 GFP_DMA | __GFP_NOWARN);
1da177e4 3740 if (copy && rq_data_dir(req) == WRITE)
7988613b 3741 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
1da177e4 3742 if (copy)
7988613b 3743 dst = copy + bv.bv_offset;
1da177e4 3744 }
7988613b 3745 for (off = 0; off < bv.bv_len; off += blksize) {
1da177e4
LT
3746 sector_t trkid = recid;
3747 unsigned int recoffs = sector_div(trkid, blk_per_trk);
3748 rcmd = cmd;
3749 count = blksize;
3750 /* Locate record for cdl special block ? */
3751 if (private->uses_cdl && recid < 2*blk_per_trk) {
3752 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
3753 rcmd |= 0x8;
3754 count = dasd_eckd_cdl_reclen(recid);
ec5883ab
HH
3755 if (count < blksize &&
3756 rq_data_dir(req) == READ)
1da177e4
LT
3757 memset(dst + count, 0xe5,
3758 blksize - count);
3759 }
3760 ccw[-1].flags |= CCW_FLAG_CC;
3761 locate_record(ccw++, LO_data++,
3762 trkid, recoffs + 1,
8e09f215 3763 1, rcmd, basedev, count);
1da177e4
LT
3764 }
3765 /* Locate record for standard blocks ? */
3766 if (private->uses_cdl && recid == 2*blk_per_trk) {
3767 ccw[-1].flags |= CCW_FLAG_CC;
3768 locate_record(ccw++, LO_data++,
3769 trkid, recoffs + 1,
3770 last_rec - recid + 1,
8e09f215 3771 cmd, basedev, count);
1da177e4
LT
3772 }
3773 /* Read/write ccw. */
3774 ccw[-1].flags |= CCW_FLAG_CC;
3775 ccw->cmd_code = rcmd;
3776 ccw->count = count;
3777 if (idal_is_needed(dst, blksize)) {
3778 ccw->cda = (__u32)(addr_t) idaws;
3779 ccw->flags = CCW_FLAG_IDA;
3780 idaws = idal_create_words(idaws, dst, blksize);
3781 } else {
3782 ccw->cda = (__u32)(addr_t) dst;
3783 ccw->flags = 0;
3784 }
3785 ccw++;
3786 dst += blksize;
3787 recid++;
3788 }
3789 }
13de227b
HS
3790 if (blk_noretry_request(req) ||
3791 block->base->features & DASD_FEATURE_FAILFAST)
1c01b8a5 3792 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
8e09f215
SW
3793 cqr->startdev = startdev;
3794 cqr->memdev = startdev;
3795 cqr->block = block;
7c8faa86 3796 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
c9346151 3797 cqr->lpm = dasd_path_get_ppm(startdev);
1f1ee9ad 3798 cqr->retries = startdev->default_retries;
1aae0560 3799 cqr->buildclk = get_tod_clock();
1da177e4 3800 cqr->status = DASD_CQR_FILLED;
5e2b17e7
JH
3801
3802 /* Set flags to suppress output for expected errors */
3803 if (dasd_eckd_is_ese(basedev)) {
3804 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
3805 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
3806 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
3807 }
3808
1da177e4
LT
3809 return cqr;
3810}
3811
f3eb5384
SW
3812static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
3813 struct dasd_device *startdev,
3814 struct dasd_block *block,
3815 struct request *req,
3816 sector_t first_rec,
3817 sector_t last_rec,
3818 sector_t first_trk,
3819 sector_t last_trk,
3820 unsigned int first_offs,
3821 unsigned int last_offs,
3822 unsigned int blk_per_trk,
3823 unsigned int blksize)
3824{
f3eb5384
SW
3825 unsigned long *idaws;
3826 struct dasd_ccw_req *cqr;
3827 struct ccw1 *ccw;
3828 struct req_iterator iter;
7988613b 3829 struct bio_vec bv;
f3eb5384
SW
3830 char *dst, *idaw_dst;
3831 unsigned int cidaw, cplength, datasize;
3832 unsigned int tlf;
3833 sector_t recid;
3834 unsigned char cmd;
3835 struct dasd_device *basedev;
3836 unsigned int trkcount, count, count_to_trk_end;
3837 unsigned int idaw_len, seg_len, part_len, len_to_track_end;
3838 unsigned char new_track, end_idaw;
3839 sector_t trkid;
3840 unsigned int recoffs;
3841
3842 basedev = block->base;
f3eb5384
SW
3843 if (rq_data_dir(req) == READ)
3844 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
3845 else if (rq_data_dir(req) == WRITE)
3846 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
3847 else
3848 return ERR_PTR(-EINVAL);
3849
3850 /* Track based I/O needs IDAWs for each page, and not just for
3851 * 64 bit addresses. We need additional idals for pages
3852 * that get filled from two tracks, so we use the number
3853 * of records as upper limit.
3854 */
3855 cidaw = last_rec - first_rec + 1;
3856 trkcount = last_trk - first_trk + 1;
3857
3858 /* 1x prefix + one read/write ccw per track */
3859 cplength = 1 + trkcount;
3860
7bf76f01 3861 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
f3eb5384
SW
3862
3863 /* Allocate the ccw request. */
68b781fe 3864 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
c5205f2f 3865 startdev, blk_mq_rq_to_pdu(req));
f3eb5384
SW
3866 if (IS_ERR(cqr))
3867 return cqr;
3868 ccw = cqr->cpaddr;
3869 /* transfer length factor: how many bytes to read from the last track */
3870 if (first_trk == last_trk)
3871 tlf = last_offs - first_offs + 1;
3872 else
3873 tlf = last_offs + 1;
3874 tlf *= blksize;
3875
3876 if (prefix_LRE(ccw++, cqr->data, first_trk,
3877 last_trk, cmd, basedev, startdev,
3878 1 /* format */, first_offs + 1,
3879 trkcount, blksize,
3880 tlf) == -EAGAIN) {
3881 /* Clock not in sync and XRC is enabled.
3882 * Try again later.
3883 */
3884 dasd_sfree_request(cqr, startdev);
3885 return ERR_PTR(-EAGAIN);
3886 }
3887
3888 /*
3889 * The translation of request into ccw programs must meet the
3890 * following conditions:
3891 * - all idaws but the first and the last must address full pages
3892 * (or 2K blocks on 31-bit)
3893 * - the scope of a ccw and it's idal ends with the track boundaries
3894 */
3895 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
3896 recid = first_rec;
3897 new_track = 1;
3898 end_idaw = 0;
3899 len_to_track_end = 0;
246ccea1 3900 idaw_dst = NULL;
f3eb5384
SW
3901 idaw_len = 0;
3902 rq_for_each_segment(bv, req, iter) {
7988613b
KO
3903 dst = page_address(bv.bv_page) + bv.bv_offset;
3904 seg_len = bv.bv_len;
f3eb5384
SW
3905 while (seg_len) {
3906 if (new_track) {
3907 trkid = recid;
3908 recoffs = sector_div(trkid, blk_per_trk);
3909 count_to_trk_end = blk_per_trk - recoffs;
3910 count = min((last_rec - recid + 1),
3911 (sector_t)count_to_trk_end);
3912 len_to_track_end = count * blksize;
3913 ccw[-1].flags |= CCW_FLAG_CC;
3914 ccw->cmd_code = cmd;
3915 ccw->count = len_to_track_end;
3916 ccw->cda = (__u32)(addr_t)idaws;
3917 ccw->flags = CCW_FLAG_IDA;
3918 ccw++;
3919 recid += count;
3920 new_track = 0;
52db45c3
SW
3921 /* first idaw for a ccw may start anywhere */
3922 if (!idaw_dst)
3923 idaw_dst = dst;
f3eb5384 3924 }
52db45c3
SW
3925 /* If we start a new idaw, we must make sure that it
3926 * starts on an IDA_BLOCK_SIZE boundary.
f3eb5384
SW
3927 * If we continue an idaw, we must make sure that the
3928 * current segment begins where the so far accumulated
3929 * idaw ends
3930 */
52db45c3
SW
3931 if (!idaw_dst) {
3932 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
3933 dasd_sfree_request(cqr, startdev);
3934 return ERR_PTR(-ERANGE);
3935 } else
3936 idaw_dst = dst;
3937 }
f3eb5384
SW
3938 if ((idaw_dst + idaw_len) != dst) {
3939 dasd_sfree_request(cqr, startdev);
3940 return ERR_PTR(-ERANGE);
3941 }
3942 part_len = min(seg_len, len_to_track_end);
3943 seg_len -= part_len;
3944 dst += part_len;
3945 idaw_len += part_len;
3946 len_to_track_end -= part_len;
3947 /* collected memory area ends on an IDA_BLOCK border,
3948 * -> create an idaw
3949 * idal_create_words will handle cases where idaw_len
3950 * is larger then IDA_BLOCK_SIZE
3951 */
3952 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
3953 end_idaw = 1;
3954 /* We also need to end the idaw at track end */
3955 if (!len_to_track_end) {
3956 new_track = 1;
3957 end_idaw = 1;
3958 }
3959 if (end_idaw) {
3960 idaws = idal_create_words(idaws, idaw_dst,
3961 idaw_len);
246ccea1 3962 idaw_dst = NULL;
f3eb5384
SW
3963 idaw_len = 0;
3964 end_idaw = 0;
3965 }
3966 }
3967 }
3968
3969 if (blk_noretry_request(req) ||
3970 block->base->features & DASD_FEATURE_FAILFAST)
3971 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3972 cqr->startdev = startdev;
3973 cqr->memdev = startdev;
3974 cqr->block = block;
7c8faa86 3975 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
c9346151 3976 cqr->lpm = dasd_path_get_ppm(startdev);
1f1ee9ad 3977 cqr->retries = startdev->default_retries;
1aae0560 3978 cqr->buildclk = get_tod_clock();
f3eb5384 3979 cqr->status = DASD_CQR_FILLED;
5e2b17e7
JH
3980
3981 /* Set flags to suppress output for expected errors */
3982 if (dasd_eckd_is_ese(basedev))
3983 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
3984
f3eb5384
SW
3985 return cqr;
3986}
3987
3988static int prepare_itcw(struct itcw *itcw,
3989 unsigned int trk, unsigned int totrk, int cmd,
3990 struct dasd_device *basedev,
3991 struct dasd_device *startdev,
3992 unsigned int rec_on_trk, int count,
3993 unsigned int blksize,
3994 unsigned int total_data_size,
3995 unsigned int tlf,
3996 unsigned int blk_per_trk)
3997{
3998 struct PFX_eckd_data pfxdata;
3999 struct dasd_eckd_private *basepriv, *startpriv;
4000 struct DE_eckd_data *dedata;
4001 struct LRE_eckd_data *lredata;
4002 struct dcw *dcw;
4003
4004 u32 begcyl, endcyl;
4005 u16 heads, beghead, endhead;
4006 u8 pfx_cmd;
4007
4008 int rc = 0;
4009 int sector = 0;
4010 int dn, d;
4011
4012
4013 /* setup prefix data */
543691a4
SO
4014 basepriv = basedev->private;
4015 startpriv = startdev->private;
f3eb5384
SW
4016 dedata = &pfxdata.define_extent;
4017 lredata = &pfxdata.locate_record;
4018
4019 memset(&pfxdata, 0, sizeof(pfxdata));
4020 pfxdata.format = 1; /* PFX with LRE */
4021 pfxdata.base_address = basepriv->ned->unit_addr;
4022 pfxdata.base_lss = basepriv->ned->ID;
4023 pfxdata.validity.define_extent = 1;
4024
4025 /* private uid is kept up to date, conf_data may be outdated */
da340f92
SH
4026 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4027 pfxdata.validity.verify_base = 1;
4028
4029 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
f3eb5384 4030 pfxdata.validity.verify_base = 1;
da340f92 4031 pfxdata.validity.hyper_pav = 1;
f3eb5384
SW
4032 }
4033
4034 switch (cmd) {
4035 case DASD_ECKD_CCW_READ_TRACK_DATA:
4036 dedata->mask.perm = 0x1;
4037 dedata->attributes.operation = basepriv->attrib.operation;
4038 dedata->blk_size = blksize;
4039 dedata->ga_extended |= 0x42;
4040 lredata->operation.orientation = 0x0;
4041 lredata->operation.operation = 0x0C;
4042 lredata->auxiliary.check_bytes = 0x01;
4043 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4044 break;
4045 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4046 dedata->mask.perm = 0x02;
4047 dedata->attributes.operation = basepriv->attrib.operation;
4048 dedata->blk_size = blksize;
5628683c 4049 rc = set_timestamp(NULL, dedata, basedev);
f3eb5384
SW
4050 dedata->ga_extended |= 0x42;
4051 lredata->operation.orientation = 0x0;
4052 lredata->operation.operation = 0x3F;
4053 lredata->extended_operation = 0x23;
4054 lredata->auxiliary.check_bytes = 0x2;
45f186be
JH
4055 /*
4056 * If XRC is supported the System Time Stamp is set. The
4057 * validity of the time stamp must be reflected in the prefix
4058 * data as well.
4059 */
4060 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4061 pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
f3eb5384
SW
4062 pfx_cmd = DASD_ECKD_CCW_PFX;
4063 break;
8fd57520
JH
4064 case DASD_ECKD_CCW_READ_COUNT_MT:
4065 dedata->mask.perm = 0x1;
4066 dedata->attributes.operation = DASD_BYPASS_CACHE;
4067 dedata->ga_extended |= 0x42;
4068 dedata->blk_size = blksize;
4069 lredata->operation.orientation = 0x2;
4070 lredata->operation.operation = 0x16;
4071 lredata->auxiliary.check_bytes = 0x01;
4072 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4073 break;
f3eb5384
SW
4074 default:
4075 DBF_DEV_EVENT(DBF_ERR, basedev,
4076 "prepare itcw, unknown opcode 0x%x", cmd);
4077 BUG();
4078 break;
4079 }
4080 if (rc)
4081 return rc;
4082
4083 dedata->attributes.mode = 0x3; /* ECKD */
4084
4085 heads = basepriv->rdc_data.trk_per_cyl;
4086 begcyl = trk / heads;
4087 beghead = trk % heads;
4088 endcyl = totrk / heads;
4089 endhead = totrk % heads;
4090
4091 /* check for sequential prestage - enhance cylinder range */
4092 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4093 dedata->attributes.operation == DASD_SEQ_ACCESS) {
4094
4095 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4096 endcyl += basepriv->attrib.nr_cyl;
4097 else
4098 endcyl = (basepriv->real_cyl - 1);
4099 }
4100
4101 set_ch_t(&dedata->beg_ext, begcyl, beghead);
4102 set_ch_t(&dedata->end_ext, endcyl, endhead);
4103
4104 dedata->ep_format = 0x20; /* records per track is valid */
4105 dedata->ep_rec_per_track = blk_per_trk;
4106
4107 if (rec_on_trk) {
4108 switch (basepriv->rdc_data.dev_type) {
4109 case 0x3390:
4110 dn = ceil_quot(blksize + 6, 232);
4111 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4112 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4113 break;
4114 case 0x3380:
4115 d = 7 + ceil_quot(blksize + 12, 32);
4116 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4117 break;
4118 }
4119 }
4120
8fd57520
JH
4121 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4122 lredata->auxiliary.length_valid = 0;
4123 lredata->auxiliary.length_scope = 0;
4124 lredata->sector = 0xff;
4125 } else {
4126 lredata->auxiliary.length_valid = 1;
4127 lredata->auxiliary.length_scope = 1;
4128 lredata->sector = sector;
4129 }
f3eb5384
SW
4130 lredata->auxiliary.imbedded_ccw_valid = 1;
4131 lredata->length = tlf;
4132 lredata->imbedded_ccw = cmd;
4133 lredata->count = count;
f3eb5384
SW
4134 set_ch_t(&lredata->seek_addr, begcyl, beghead);
4135 lredata->search_arg.cyl = lredata->seek_addr.cyl;
4136 lredata->search_arg.head = lredata->seek_addr.head;
4137 lredata->search_arg.record = rec_on_trk;
4138
4139 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4140 &pfxdata, sizeof(pfxdata), total_data_size);
757853ea 4141 return PTR_ERR_OR_ZERO(dcw);
f3eb5384
SW
4142}
4143
4144static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4145 struct dasd_device *startdev,
4146 struct dasd_block *block,
4147 struct request *req,
4148 sector_t first_rec,
4149 sector_t last_rec,
4150 sector_t first_trk,
4151 sector_t last_trk,
4152 unsigned int first_offs,
4153 unsigned int last_offs,
4154 unsigned int blk_per_trk,
4155 unsigned int blksize)
4156{
f3eb5384
SW
4157 struct dasd_ccw_req *cqr;
4158 struct req_iterator iter;
7988613b 4159 struct bio_vec bv;
f3eb5384
SW
4160 char *dst;
4161 unsigned int trkcount, ctidaw;
4162 unsigned char cmd;
4163 struct dasd_device *basedev;
4164 unsigned int tlf;
4165 struct itcw *itcw;
4166 struct tidaw *last_tidaw = NULL;
4167 int itcw_op;
4168 size_t itcw_size;
ef19298b
SW
4169 u8 tidaw_flags;
4170 unsigned int seg_len, part_len, len_to_track_end;
4171 unsigned char new_track;
4172 sector_t recid, trkid;
4173 unsigned int offs;
4174 unsigned int count, count_to_trk_end;
cd10502b 4175 int ret;
f3eb5384
SW
4176
4177 basedev = block->base;
f3eb5384
SW
4178 if (rq_data_dir(req) == READ) {
4179 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4180 itcw_op = ITCW_OP_READ;
4181 } else if (rq_data_dir(req) == WRITE) {
4182 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4183 itcw_op = ITCW_OP_WRITE;
4184 } else
4185 return ERR_PTR(-EINVAL);
4186
4187 /* trackbased I/O needs address all memory via TIDAWs,
4188 * not just for 64 bit addresses. This allows us to map
4189 * each segment directly to one tidaw.
ef19298b
SW
4190 * In the case of write requests, additional tidaws may
4191 * be needed when a segment crosses a track boundary.
f3eb5384
SW
4192 */
4193 trkcount = last_trk - first_trk + 1;
4194 ctidaw = 0;
4195 rq_for_each_segment(bv, req, iter) {
4196 ++ctidaw;
4197 }
ef19298b
SW
4198 if (rq_data_dir(req) == WRITE)
4199 ctidaw += (last_trk - first_trk);
f3eb5384
SW
4200
4201 /* Allocate the ccw request. */
4202 itcw_size = itcw_calc_size(0, ctidaw, 0);
c5205f2f
SO
4203 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4204 blk_mq_rq_to_pdu(req));
f3eb5384
SW
4205 if (IS_ERR(cqr))
4206 return cqr;
4207
f3eb5384
SW
4208 /* transfer length factor: how many bytes to read from the last track */
4209 if (first_trk == last_trk)
4210 tlf = last_offs - first_offs + 1;
4211 else
4212 tlf = last_offs + 1;
4213 tlf *= blksize;
4214
4215 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
ef19298b 4216 if (IS_ERR(itcw)) {
cd10502b
JL
4217 ret = -EINVAL;
4218 goto out_error;
ef19298b 4219 }
f3eb5384 4220 cqr->cpaddr = itcw_get_tcw(itcw);
f3eb5384
SW
4221 if (prepare_itcw(itcw, first_trk, last_trk,
4222 cmd, basedev, startdev,
4223 first_offs + 1,
4224 trkcount, blksize,
4225 (last_rec - first_rec + 1) * blksize,
4226 tlf, blk_per_trk) == -EAGAIN) {
4227 /* Clock not in sync and XRC is enabled.
4228 * Try again later.
4229 */
cd10502b
JL
4230 ret = -EAGAIN;
4231 goto out_error;
f3eb5384 4232 }
d54cddb6 4233 len_to_track_end = 0;
f3eb5384
SW
4234 /*
4235 * A tidaw can address 4k of memory, but must not cross page boundaries
4236 * We can let the block layer handle this by setting
4237 * blk_queue_segment_boundary to page boundaries and
4238 * blk_max_segment_size to page size when setting up the request queue.
ef19298b
SW
4239 * For write requests, a TIDAW must not cross track boundaries, because
4240 * we have to set the CBC flag on the last tidaw for each track.
f3eb5384 4241 */
ef19298b
SW
4242 if (rq_data_dir(req) == WRITE) {
4243 new_track = 1;
4244 recid = first_rec;
4245 rq_for_each_segment(bv, req, iter) {
7988613b
KO
4246 dst = page_address(bv.bv_page) + bv.bv_offset;
4247 seg_len = bv.bv_len;
ef19298b
SW
4248 while (seg_len) {
4249 if (new_track) {
4250 trkid = recid;
4251 offs = sector_div(trkid, blk_per_trk);
4252 count_to_trk_end = blk_per_trk - offs;
4253 count = min((last_rec - recid + 1),
4254 (sector_t)count_to_trk_end);
4255 len_to_track_end = count * blksize;
4256 recid += count;
4257 new_track = 0;
4258 }
4259 part_len = min(seg_len, len_to_track_end);
4260 seg_len -= part_len;
4261 len_to_track_end -= part_len;
4262 /* We need to end the tidaw at track end */
4263 if (!len_to_track_end) {
4264 new_track = 1;
4265 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4266 } else
4267 tidaw_flags = 0;
4268 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4269 dst, part_len);
cd10502b
JL
4270 if (IS_ERR(last_tidaw)) {
4271 ret = -EINVAL;
4272 goto out_error;
4273 }
ef19298b
SW
4274 dst += part_len;
4275 }
4276 }
4277 } else {
4278 rq_for_each_segment(bv, req, iter) {
7988613b 4279 dst = page_address(bv.bv_page) + bv.bv_offset;
ef19298b 4280 last_tidaw = itcw_add_tidaw(itcw, 0x00,
7988613b 4281 dst, bv.bv_len);
cd10502b
JL
4282 if (IS_ERR(last_tidaw)) {
4283 ret = -EINVAL;
4284 goto out_error;
4285 }
ef19298b 4286 }
f3eb5384 4287 }
ef19298b
SW
4288 last_tidaw->flags |= TIDAW_FLAGS_LAST;
4289 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
f3eb5384
SW
4290 itcw_finalize(itcw);
4291
4292 if (blk_noretry_request(req) ||
4293 block->base->features & DASD_FEATURE_FAILFAST)
4294 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
ef19298b 4295 cqr->cpmode = 1;
f3eb5384
SW
4296 cqr->startdev = startdev;
4297 cqr->memdev = startdev;
4298 cqr->block = block;
7c8faa86 4299 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
c9346151 4300 cqr->lpm = dasd_path_get_ppm(startdev);
1f1ee9ad 4301 cqr->retries = startdev->default_retries;
1aae0560 4302 cqr->buildclk = get_tod_clock();
f3eb5384 4303 cqr->status = DASD_CQR_FILLED;
5e2b17e7
JH
4304
4305 /* Set flags to suppress output for expected errors */
4306 if (dasd_eckd_is_ese(basedev)) {
4307 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4308 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4309 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4310 }
4311
f3eb5384 4312 return cqr;
cd10502b
JL
4313out_error:
4314 dasd_sfree_request(cqr, startdev);
4315 return ERR_PTR(ret);
f3eb5384
SW
4316}
4317
4318static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4319 struct dasd_block *block,
4320 struct request *req)
4321{
ef19298b 4322 int cmdrtd, cmdwtd;
f3eb5384 4323 int use_prefix;
ef19298b 4324 int fcx_multitrack;
45b44d76 4325 struct dasd_eckd_private *private;
f3eb5384
SW
4326 struct dasd_device *basedev;
4327 sector_t first_rec, last_rec;
4328 sector_t first_trk, last_trk;
4329 unsigned int first_offs, last_offs;
4330 unsigned int blk_per_trk, blksize;
4331 int cdlspecial;
ef19298b 4332 unsigned int data_size;
f3eb5384
SW
4333 struct dasd_ccw_req *cqr;
4334
4335 basedev = block->base;
543691a4 4336 private = basedev->private;
f3eb5384
SW
4337
4338 /* Calculate number of blocks/records per track. */
4339 blksize = block->bp_block;
4340 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
6fca97a9
SH
4341 if (blk_per_trk == 0)
4342 return ERR_PTR(-EINVAL);
f3eb5384 4343 /* Calculate record id of first and last block. */
83096ebf 4344 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
f3eb5384
SW
4345 first_offs = sector_div(first_trk, blk_per_trk);
4346 last_rec = last_trk =
83096ebf 4347 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
f3eb5384
SW
4348 last_offs = sector_div(last_trk, blk_per_trk);
4349 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4350
ef19298b
SW
4351 fcx_multitrack = private->features.feature[40] & 0x20;
4352 data_size = blk_rq_bytes(req);
26a35f37
SW
4353 if (data_size % blksize)
4354 return ERR_PTR(-EINVAL);
ef19298b
SW
4355 /* tpm write request add CBC data on each track boundary */
4356 if (rq_data_dir(req) == WRITE)
4357 data_size += (last_trk - first_trk) * 4;
f3eb5384
SW
4358
4359 /* is read track data and write track data in command mode supported? */
4360 cmdrtd = private->features.feature[9] & 0x20;
4361 cmdwtd = private->features.feature[12] & 0x40;
4362 use_prefix = private->features.feature[8] & 0x01;
4363
4364 cqr = NULL;
4365 if (cdlspecial || dasd_page_cache) {
4366 /* do nothing, just fall through to the cmd mode single case */
ef19298b
SW
4367 } else if ((data_size <= private->fcx_max_data)
4368 && (fcx_multitrack || (first_trk == last_trk))) {
f3eb5384
SW
4369 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4370 first_rec, last_rec,
4371 first_trk, last_trk,
4372 first_offs, last_offs,
4373 blk_per_trk, blksize);
ef19298b
SW
4374 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4375 (PTR_ERR(cqr) != -ENOMEM))
f3eb5384
SW
4376 cqr = NULL;
4377 } else if (use_prefix &&
4378 (((rq_data_dir(req) == READ) && cmdrtd) ||
4379 ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4380 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4381 first_rec, last_rec,
4382 first_trk, last_trk,
4383 first_offs, last_offs,
4384 blk_per_trk, blksize);
ef19298b
SW
4385 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4386 (PTR_ERR(cqr) != -ENOMEM))
f3eb5384
SW
4387 cqr = NULL;
4388 }
4389 if (!cqr)
4390 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4391 first_rec, last_rec,
4392 first_trk, last_trk,
4393 first_offs, last_offs,
4394 blk_per_trk, blksize);
4395 return cqr;
4396}
4397
bbc7f7ea
JH
4398static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4399 struct dasd_block *block,
4400 struct request *req)
e4dbb0f2 4401{
9d2be0c1
JH
4402 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4403 unsigned int seg_len, len_to_track_end;
4404 unsigned int cidaw, cplength, datasize;
4405 sector_t first_trk, last_trk, sectors;
4406 struct dasd_eckd_private *base_priv;
e4dbb0f2 4407 struct dasd_device *basedev;
e4dbb0f2 4408 struct req_iterator iter;
9d2be0c1
JH
4409 struct dasd_ccw_req *cqr;
4410 unsigned int first_offs;
4411 unsigned int trkcount;
4412 unsigned long *idaws;
4413 unsigned int size;
4414 unsigned char cmd;
7988613b 4415 struct bio_vec bv;
9d2be0c1
JH
4416 struct ccw1 *ccw;
4417 int use_prefix;
4418 void *data;
e4dbb0f2 4419 char *dst;
e4dbb0f2
SH
4420
4421 /*
4422 * raw track access needs to be mutiple of 64k and on 64k boundary
558b9ef0
SW
4423 * For read requests we can fix an incorrect alignment by padding
4424 * the request with dummy pages.
e4dbb0f2 4425 */
558b9ef0
SW
4426 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4427 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4428 DASD_RAW_SECTORS_PER_TRACK;
4429 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4430 DASD_RAW_SECTORS_PER_TRACK;
4431 basedev = block->base;
4432 if ((start_padding_sectors || end_padding_sectors) &&
4433 (rq_data_dir(req) == WRITE)) {
4434 DBF_DEV_EVENT(DBF_ERR, basedev,
e78c21d1 4435 "raw write not track aligned (%llu,%llu) req %p",
558b9ef0 4436 start_padding_sectors, end_padding_sectors, req);
9d2be0c1 4437 return ERR_PTR(-EINVAL);
e4dbb0f2
SH
4438 }
4439
4440 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4441 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4442 DASD_RAW_SECTORS_PER_TRACK;
4443 trkcount = last_trk - first_trk + 1;
4444 first_offs = 0;
e4dbb0f2
SH
4445
4446 if (rq_data_dir(req) == READ)
4447 cmd = DASD_ECKD_CCW_READ_TRACK;
4448 else if (rq_data_dir(req) == WRITE)
4449 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
9d2be0c1
JH
4450 else
4451 return ERR_PTR(-EINVAL);
e4dbb0f2
SH
4452
4453 /*
4454 * Raw track based I/O needs IDAWs for each page,
4455 * and not just for 64 bit addresses.
4456 */
4457 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4458
e4dbb0f2 4459 /*
9d2be0c1
JH
4460 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4461 * of extended parameter. This is needed for write full track.
e4dbb0f2 4462 */
9d2be0c1
JH
4463 base_priv = basedev->private;
4464 use_prefix = base_priv->features.feature[8] & 0x01;
4465 if (use_prefix) {
4466 cplength = 1 + trkcount;
4467 size = sizeof(struct PFX_eckd_data) + 2;
4468 } else {
4469 cplength = 2 + trkcount;
4470 size = sizeof(struct DE_eckd_data) +
4471 sizeof(struct LRE_eckd_data) + 2;
4472 }
4473 size = ALIGN(size, 8);
e4dbb0f2 4474
7bf76f01 4475 datasize = size + cidaw * sizeof(unsigned long);
e4dbb0f2
SH
4476
4477 /* Allocate the ccw request. */
4478 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
c5205f2f 4479 datasize, startdev, blk_mq_rq_to_pdu(req));
e4dbb0f2 4480 if (IS_ERR(cqr))
9d2be0c1
JH
4481 return cqr;
4482
e4dbb0f2 4483 ccw = cqr->cpaddr;
9d2be0c1 4484 data = cqr->data;
e4dbb0f2 4485
9d2be0c1
JH
4486 if (use_prefix) {
4487 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4488 startdev, 1, first_offs + 1, trkcount, 0, 0);
4489 } else {
4490 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4491 ccw[-1].flags |= CCW_FLAG_CC;
4492
4493 data += sizeof(struct DE_eckd_data);
4494 locate_record_ext(ccw++, data, first_trk, first_offs + 1,
4495 trkcount, cmd, basedev, 0, 0);
e4dbb0f2
SH
4496 }
4497
9d2be0c1 4498 idaws = (unsigned long *)(cqr->data + size);
e4dbb0f2 4499 len_to_track_end = 0;
558b9ef0
SW
4500 if (start_padding_sectors) {
4501 ccw[-1].flags |= CCW_FLAG_CC;
4502 ccw->cmd_code = cmd;
4503 /* maximum 3390 track size */
4504 ccw->count = 57326;
4505 /* 64k map to one track */
4506 len_to_track_end = 65536 - start_padding_sectors * 512;
4507 ccw->cda = (__u32)(addr_t)idaws;
4508 ccw->flags |= CCW_FLAG_IDA;
4509 ccw->flags |= CCW_FLAG_SLI;
4510 ccw++;
4511 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4512 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4513 }
e4dbb0f2 4514 rq_for_each_segment(bv, req, iter) {
7988613b
KO
4515 dst = page_address(bv.bv_page) + bv.bv_offset;
4516 seg_len = bv.bv_len;
558b9ef0
SW
4517 if (cmd == DASD_ECKD_CCW_READ_TRACK)
4518 memset(dst, 0, seg_len);
e4dbb0f2
SH
4519 if (!len_to_track_end) {
4520 ccw[-1].flags |= CCW_FLAG_CC;
4521 ccw->cmd_code = cmd;
4522 /* maximum 3390 track size */
4523 ccw->count = 57326;
4524 /* 64k map to one track */
4525 len_to_track_end = 65536;
4526 ccw->cda = (__u32)(addr_t)idaws;
4527 ccw->flags |= CCW_FLAG_IDA;
4528 ccw->flags |= CCW_FLAG_SLI;
4529 ccw++;
4530 }
4531 len_to_track_end -= seg_len;
4532 idaws = idal_create_words(idaws, dst, seg_len);
4533 }
558b9ef0
SW
4534 for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4535 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
e4dbb0f2
SH
4536 if (blk_noretry_request(req) ||
4537 block->base->features & DASD_FEATURE_FAILFAST)
4538 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4539 cqr->startdev = startdev;
4540 cqr->memdev = startdev;
4541 cqr->block = block;
4542 cqr->expires = startdev->default_expires * HZ;
c9346151 4543 cqr->lpm = dasd_path_get_ppm(startdev);
1f1ee9ad 4544 cqr->retries = startdev->default_retries;
1aae0560 4545 cqr->buildclk = get_tod_clock();
e4dbb0f2
SH
4546 cqr->status = DASD_CQR_FILLED;
4547
e4dbb0f2
SH
4548 return cqr;
4549}
4550
4551
1da177e4
LT
4552static int
4553dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4554{
4555 struct dasd_eckd_private *private;
4556 struct ccw1 *ccw;
5705f702 4557 struct req_iterator iter;
7988613b 4558 struct bio_vec bv;
1da177e4
LT
4559 char *dst, *cda;
4560 unsigned int blksize, blk_per_trk, off;
4561 sector_t recid;
5705f702 4562 int status;
1da177e4
LT
4563
4564 if (!dasd_page_cache)
4565 goto out;
543691a4 4566 private = cqr->block->base->private;
8e09f215 4567 blksize = cqr->block->bp_block;
1da177e4 4568 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
83096ebf 4569 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
1da177e4
LT
4570 ccw = cqr->cpaddr;
4571 /* Skip over define extent & locate record. */
4572 ccw++;
4573 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4574 ccw++;
5705f702 4575 rq_for_each_segment(bv, req, iter) {
7988613b
KO
4576 dst = page_address(bv.bv_page) + bv.bv_offset;
4577 for (off = 0; off < bv.bv_len; off += blksize) {
1da177e4
LT
4578 /* Skip locate record. */
4579 if (private->uses_cdl && recid <= 2*blk_per_trk)
4580 ccw++;
4581 if (dst) {
4582 if (ccw->flags & CCW_FLAG_IDA)
4583 cda = *((char **)((addr_t) ccw->cda));
4584 else
4585 cda = (char *)((addr_t) ccw->cda);
4586 if (dst != cda) {
4587 if (rq_data_dir(req) == READ)
7988613b 4588 memcpy(dst, cda, bv.bv_len);
1da177e4
LT
4589 kmem_cache_free(dasd_page_cache,
4590 (void *)((addr_t)cda & PAGE_MASK));
4591 }
4592 dst = NULL;
4593 }
4594 ccw++;
4595 recid++;
4596 }
4597 }
4598out:
4599 status = cqr->status == DASD_CQR_DONE;
8e09f215 4600 dasd_sfree_request(cqr, cqr->memdev);
1da177e4
LT
4601 return status;
4602}
4603
8e09f215 4604/*
f3eb5384 4605 * Modify ccw/tcw in cqr so it can be started on a base device.
8e09f215
SW
4606 *
4607 * Note that this is not enough to restart the cqr!
4608 * Either reset cqr->startdev as well (summary unit check handling)
4609 * or restart via separate cqr (as in ERP handling).
4610 */
4611void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4612{
4613 struct ccw1 *ccw;
4614 struct PFX_eckd_data *pfxdata;
f3eb5384
SW
4615 struct tcw *tcw;
4616 struct tccb *tccb;
4617 struct dcw *dcw;
4618
4619 if (cqr->cpmode == 1) {
4620 tcw = cqr->cpaddr;
4621 tccb = tcw_get_tccb(tcw);
4622 dcw = (struct dcw *)&tccb->tca[0];
4623 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
8e09f215
SW
4624 pfxdata->validity.verify_base = 0;
4625 pfxdata->validity.hyper_pav = 0;
f3eb5384
SW
4626 } else {
4627 ccw = cqr->cpaddr;
4628 pfxdata = cqr->data;
4629 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4630 pfxdata->validity.verify_base = 0;
4631 pfxdata->validity.hyper_pav = 0;
4632 }
8e09f215
SW
4633 }
4634}
4635
4636#define DASD_ECKD_CHANQ_MAX_SIZE 4
4637
4638static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4639 struct dasd_block *block,
4640 struct request *req)
4641{
4642 struct dasd_eckd_private *private;
4643 struct dasd_device *startdev;
4644 unsigned long flags;
4645 struct dasd_ccw_req *cqr;
4646
4647 startdev = dasd_alias_get_start_dev(base);
4648 if (!startdev)
4649 startdev = base;
543691a4 4650 private = startdev->private;
8e09f215
SW
4651 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4652 return ERR_PTR(-EBUSY);
4653
4654 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4655 private->count++;
e4dbb0f2 4656 if ((base->features & DASD_FEATURE_USERAW))
bbc7f7ea 4657 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
e4dbb0f2
SH
4658 else
4659 cqr = dasd_eckd_build_cp(startdev, block, req);
8e09f215
SW
4660 if (IS_ERR(cqr))
4661 private->count--;
4662 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4663 return cqr;
4664}
4665
4666static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4667 struct request *req)
4668{
4669 struct dasd_eckd_private *private;
4670 unsigned long flags;
4671
4672 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
543691a4 4673 private = cqr->memdev->private;
8e09f215
SW
4674 private->count--;
4675 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4676 return dasd_eckd_free_cp(cqr, req);
4677}
4678
1da177e4
LT
4679static int
4680dasd_eckd_fill_info(struct dasd_device * device,
4681 struct dasd_information2_t * info)
4682{
543691a4 4683 struct dasd_eckd_private *private = device->private;
1da177e4 4684
1da177e4
LT
4685 info->label_block = 2;
4686 info->FBA_layout = private->uses_cdl ? 0 : 1;
4687 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
543691a4 4688 info->characteristics_size = sizeof(private->rdc_data);
1da177e4 4689 memcpy(info->characteristics, &private->rdc_data,
543691a4 4690 sizeof(private->rdc_data));
4abb08c2
SW
4691 info->confdata_size = min((unsigned long)private->conf_len,
4692 sizeof(info->configuration_data));
4693 memcpy(info->configuration_data, private->conf_data,
4694 info->confdata_size);
1da177e4
LT
4695 return 0;
4696}
4697
4698/*
4699 * SECTION: ioctl functions for eckd devices.
4700 */
4701
4702/*
4703 * Release device ioctl.
138c014d 4704 * Buils a channel programm to releases a prior reserved
1da177e4
LT
4705 * (see dasd_eckd_reserve) device.
4706 */
4707static int
1107ccfb 4708dasd_eckd_release(struct dasd_device *device)
1da177e4 4709{
1da177e4
LT
4710 struct dasd_ccw_req *cqr;
4711 int rc;
f3eb5384 4712 struct ccw1 *ccw;
f932bcea 4713 int useglobal;
1da177e4
LT
4714
4715 if (!capable(CAP_SYS_ADMIN))
4716 return -EACCES;
4717
f932bcea 4718 useglobal = 0;
c5205f2f 4719 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
1da177e4 4720 if (IS_ERR(cqr)) {
f932bcea
SW
4721 mutex_lock(&dasd_reserve_mutex);
4722 useglobal = 1;
4723 cqr = &dasd_reserve_req->cqr;
4724 memset(cqr, 0, sizeof(*cqr));
4725 memset(&dasd_reserve_req->ccw, 0,
4726 sizeof(dasd_reserve_req->ccw));
4727 cqr->cpaddr = &dasd_reserve_req->ccw;
4728 cqr->data = &dasd_reserve_req->data;
4729 cqr->magic = DASD_ECKD_MAGIC;
1da177e4 4730 }
f3eb5384
SW
4731 ccw = cqr->cpaddr;
4732 ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
4733 ccw->flags |= CCW_FLAG_SLI;
4734 ccw->count = 32;
4735 ccw->cda = (__u32)(addr_t) cqr->data;
8e09f215
SW
4736 cqr->startdev = device;
4737 cqr->memdev = device;
1da177e4 4738 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1c01b8a5 4739 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
336c340b 4740 cqr->retries = 2; /* set retry counter to enable basic ERP */
1da177e4 4741 cqr->expires = 2 * HZ;
1aae0560 4742 cqr->buildclk = get_tod_clock();
1da177e4
LT
4743 cqr->status = DASD_CQR_FILLED;
4744
4745 rc = dasd_sleep_on_immediatly(cqr);
5a27e60d
SW
4746 if (!rc)
4747 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
1da177e4 4748
f932bcea
SW
4749 if (useglobal)
4750 mutex_unlock(&dasd_reserve_mutex);
4751 else
4752 dasd_sfree_request(cqr, cqr->memdev);
1da177e4
LT
4753 return rc;
4754}
4755
4756/*
4757 * Reserve device ioctl.
4758 * Options are set to 'synchronous wait for interrupt' and
138c014d
HH
4759 * 'timeout the request'. This leads to a terminate IO if
4760 * the interrupt is outstanding for a certain time.
1da177e4
LT
4761 */
4762static int
1107ccfb 4763dasd_eckd_reserve(struct dasd_device *device)
1da177e4 4764{
1da177e4
LT
4765 struct dasd_ccw_req *cqr;
4766 int rc;
f3eb5384 4767 struct ccw1 *ccw;
f932bcea 4768 int useglobal;
1da177e4
LT
4769
4770 if (!capable(CAP_SYS_ADMIN))
4771 return -EACCES;
4772
f932bcea 4773 useglobal = 0;
c5205f2f 4774 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
1da177e4 4775 if (IS_ERR(cqr)) {
f932bcea
SW
4776 mutex_lock(&dasd_reserve_mutex);
4777 useglobal = 1;
4778 cqr = &dasd_reserve_req->cqr;
4779 memset(cqr, 0, sizeof(*cqr));
4780 memset(&dasd_reserve_req->ccw, 0,
4781 sizeof(dasd_reserve_req->ccw));
4782 cqr->cpaddr = &dasd_reserve_req->ccw;
4783 cqr->data = &dasd_reserve_req->data;
4784 cqr->magic = DASD_ECKD_MAGIC;
1da177e4 4785 }
f3eb5384
SW
4786 ccw = cqr->cpaddr;
4787 ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
4788 ccw->flags |= CCW_FLAG_SLI;
4789 ccw->count = 32;
4790 ccw->cda = (__u32)(addr_t) cqr->data;
8e09f215
SW
4791 cqr->startdev = device;
4792 cqr->memdev = device;
1da177e4 4793 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1c01b8a5 4794 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
336c340b 4795 cqr->retries = 2; /* set retry counter to enable basic ERP */
1da177e4 4796 cqr->expires = 2 * HZ;
1aae0560 4797 cqr->buildclk = get_tod_clock();
1da177e4
LT
4798 cqr->status = DASD_CQR_FILLED;
4799
4800 rc = dasd_sleep_on_immediatly(cqr);
5a27e60d
SW
4801 if (!rc)
4802 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
1da177e4 4803
f932bcea
SW
4804 if (useglobal)
4805 mutex_unlock(&dasd_reserve_mutex);
4806 else
4807 dasd_sfree_request(cqr, cqr->memdev);
1da177e4
LT
4808 return rc;
4809}
4810
4811/*
4812 * Steal lock ioctl - unconditional reserve device.
138c014d 4813 * Buils a channel programm to break a device's reservation.
1da177e4
LT
4814 * (unconditional reserve)
4815 */
4816static int
1107ccfb 4817dasd_eckd_steal_lock(struct dasd_device *device)
1da177e4 4818{
1da177e4
LT
4819 struct dasd_ccw_req *cqr;
4820 int rc;
f3eb5384 4821 struct ccw1 *ccw;
f932bcea 4822 int useglobal;
1da177e4
LT
4823
4824 if (!capable(CAP_SYS_ADMIN))
4825 return -EACCES;
4826
f932bcea 4827 useglobal = 0;
c5205f2f 4828 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
1da177e4 4829 if (IS_ERR(cqr)) {
f932bcea
SW
4830 mutex_lock(&dasd_reserve_mutex);
4831 useglobal = 1;
4832 cqr = &dasd_reserve_req->cqr;
4833 memset(cqr, 0, sizeof(*cqr));
4834 memset(&dasd_reserve_req->ccw, 0,
4835 sizeof(dasd_reserve_req->ccw));
4836 cqr->cpaddr = &dasd_reserve_req->ccw;
4837 cqr->data = &dasd_reserve_req->data;
4838 cqr->magic = DASD_ECKD_MAGIC;
1da177e4 4839 }
f3eb5384
SW
4840 ccw = cqr->cpaddr;
4841 ccw->cmd_code = DASD_ECKD_CCW_SLCK;
4842 ccw->flags |= CCW_FLAG_SLI;
4843 ccw->count = 32;
4844 ccw->cda = (__u32)(addr_t) cqr->data;
8e09f215
SW
4845 cqr->startdev = device;
4846 cqr->memdev = device;
1da177e4 4847 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1c01b8a5 4848 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
336c340b 4849 cqr->retries = 2; /* set retry counter to enable basic ERP */
1da177e4 4850 cqr->expires = 2 * HZ;
1aae0560 4851 cqr->buildclk = get_tod_clock();
1da177e4
LT
4852 cqr->status = DASD_CQR_FILLED;
4853
4854 rc = dasd_sleep_on_immediatly(cqr);
5a27e60d
SW
4855 if (!rc)
4856 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
1da177e4 4857
f932bcea
SW
4858 if (useglobal)
4859 mutex_unlock(&dasd_reserve_mutex);
4860 else
4861 dasd_sfree_request(cqr, cqr->memdev);
1da177e4
LT
4862 return rc;
4863}
4864
196339f1
SW
4865/*
4866 * SNID - Sense Path Group ID
4867 * This ioctl may be used in situations where I/O is stalled due to
4868 * a reserve, so if the normal dasd_smalloc_request fails, we use the
4869 * preallocated dasd_reserve_req.
4870 */
4871static int dasd_eckd_snid(struct dasd_device *device,
4872 void __user *argp)
4873{
4874 struct dasd_ccw_req *cqr;
4875 int rc;
4876 struct ccw1 *ccw;
4877 int useglobal;
4878 struct dasd_snid_ioctl_data usrparm;
4879
4880 if (!capable(CAP_SYS_ADMIN))
4881 return -EACCES;
4882
4883 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
4884 return -EFAULT;
4885
4886 useglobal = 0;
4887 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
c5205f2f
SO
4888 sizeof(struct dasd_snid_data), device,
4889 NULL);
196339f1
SW
4890 if (IS_ERR(cqr)) {
4891 mutex_lock(&dasd_reserve_mutex);
4892 useglobal = 1;
4893 cqr = &dasd_reserve_req->cqr;
4894 memset(cqr, 0, sizeof(*cqr));
4895 memset(&dasd_reserve_req->ccw, 0,
4896 sizeof(dasd_reserve_req->ccw));
4897 cqr->cpaddr = &dasd_reserve_req->ccw;
4898 cqr->data = &dasd_reserve_req->data;
4899 cqr->magic = DASD_ECKD_MAGIC;
4900 }
4901 ccw = cqr->cpaddr;
4902 ccw->cmd_code = DASD_ECKD_CCW_SNID;
4903 ccw->flags |= CCW_FLAG_SLI;
4904 ccw->count = 12;
4905 ccw->cda = (__u32)(addr_t) cqr->data;
4906 cqr->startdev = device;
4907 cqr->memdev = device;
4908 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4909 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5a27e60d 4910 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
196339f1
SW
4911 cqr->retries = 5;
4912 cqr->expires = 10 * HZ;
1aae0560 4913 cqr->buildclk = get_tod_clock();
196339f1
SW
4914 cqr->status = DASD_CQR_FILLED;
4915 cqr->lpm = usrparm.path_mask;
4916
4917 rc = dasd_sleep_on_immediatly(cqr);
4918 /* verify that I/O processing didn't modify the path mask */
4919 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
4920 rc = -EIO;
4921 if (!rc) {
4922 usrparm.data = *((struct dasd_snid_data *)cqr->data);
4923 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
4924 rc = -EFAULT;
4925 }
4926
4927 if (useglobal)
4928 mutex_unlock(&dasd_reserve_mutex);
4929 else
4930 dasd_sfree_request(cqr, cqr->memdev);
4931 return rc;
4932}
4933
1da177e4
LT
4934/*
4935 * Read performance statistics
4936 */
4937static int
1107ccfb 4938dasd_eckd_performance(struct dasd_device *device, void __user *argp)
1da177e4 4939{
1da177e4
LT
4940 struct dasd_psf_prssd_data *prssdp;
4941 struct dasd_rssd_perf_stats_t *stats;
4942 struct dasd_ccw_req *cqr;
4943 struct ccw1 *ccw;
4944 int rc;
4945
68b781fe 4946 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
8e09f215
SW
4947 (sizeof(struct dasd_psf_prssd_data) +
4948 sizeof(struct dasd_rssd_perf_stats_t)),
c5205f2f 4949 device, NULL);
1da177e4 4950 if (IS_ERR(cqr)) {
fc19f381 4951 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1da177e4
LT
4952 "Could not allocate initialization request");
4953 return PTR_ERR(cqr);
4954 }
8e09f215
SW
4955 cqr->startdev = device;
4956 cqr->memdev = device;
1da177e4 4957 cqr->retries = 0;
eb6e199b 4958 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1da177e4
LT
4959 cqr->expires = 10 * HZ;
4960
4961 /* Prepare for Read Subsystem Data */
4962 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
8e09f215 4963 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1da177e4 4964 prssdp->order = PSF_ORDER_PRSSD;
5d67d164 4965 prssdp->suborder = 0x01; /* Performance Statistics */
1da177e4
LT
4966 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
4967
4968 ccw = cqr->cpaddr;
4969 ccw->cmd_code = DASD_ECKD_CCW_PSF;
8e09f215 4970 ccw->count = sizeof(struct dasd_psf_prssd_data);
1da177e4
LT
4971 ccw->flags |= CCW_FLAG_CC;
4972 ccw->cda = (__u32)(addr_t) prssdp;
4973
4974 /* Read Subsystem Data - Performance Statistics */
4975 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
8e09f215 4976 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
1da177e4
LT
4977
4978 ccw++;
4979 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
8e09f215 4980 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
1da177e4
LT
4981 ccw->cda = (__u32)(addr_t) stats;
4982
1aae0560 4983 cqr->buildclk = get_tod_clock();
1da177e4
LT
4984 cqr->status = DASD_CQR_FILLED;
4985 rc = dasd_sleep_on(cqr);
4986 if (rc == 0) {
1da177e4
LT
4987 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
4988 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1107ccfb
CH
4989 if (copy_to_user(argp, stats,
4990 sizeof(struct dasd_rssd_perf_stats_t)))
4991 rc = -EFAULT;
1da177e4 4992 }
8e09f215 4993 dasd_sfree_request(cqr, cqr->memdev);
1da177e4
LT
4994 return rc;
4995}
4996
4997/*
4998 * Get attributes (cache operations)
4999 * Returnes the cache attributes used in Define Extend (DE).
5000 */
5001static int
1107ccfb 5002dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
1da177e4 5003{
543691a4 5004 struct dasd_eckd_private *private = device->private;
1107ccfb 5005 struct attrib_data_t attrib = private->attrib;
1da177e4
LT
5006 int rc;
5007
5008 if (!capable(CAP_SYS_ADMIN))
5009 return -EACCES;
1107ccfb 5010 if (!argp)
1da177e4
LT
5011 return -EINVAL;
5012
1107ccfb
CH
5013 rc = 0;
5014 if (copy_to_user(argp, (long *) &attrib,
8e09f215 5015 sizeof(struct attrib_data_t)))
1107ccfb 5016 rc = -EFAULT;
1da177e4
LT
5017
5018 return rc;
5019}
5020
5021/*
5022 * Set attributes (cache operations)
5023 * Stores the attributes for cache operation to be used in Define Extend (DE).
5024 */
5025static int
1107ccfb 5026dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
1da177e4 5027{
543691a4 5028 struct dasd_eckd_private *private = device->private;
1da177e4
LT
5029 struct attrib_data_t attrib;
5030
5031 if (!capable(CAP_SYS_ADMIN))
5032 return -EACCES;
1107ccfb 5033 if (!argp)
1da177e4
LT
5034 return -EINVAL;
5035
1107ccfb 5036 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
1da177e4 5037 return -EFAULT;
1da177e4
LT
5038 private->attrib = attrib;
5039
fc19f381
SH
5040 dev_info(&device->cdev->dev,
5041 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5042 private->attrib.operation, private->attrib.nr_cyl);
1da177e4
LT
5043 return 0;
5044}
5045
ab1d848f
NH
5046/*
5047 * Issue syscall I/O to EMC Symmetrix array.
5048 * CCWs are PSF and RSSD
5049 */
5050static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5051{
5052 struct dasd_symmio_parms usrparm;
5053 char *psf_data, *rssd_result;
5054 struct dasd_ccw_req *cqr;
5055 struct ccw1 *ccw;
52898025 5056 char psf0, psf1;
ab1d848f
NH
5057 int rc;
5058
52898025
NH
5059 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5060 return -EACCES;
5061 psf0 = psf1 = 0;
5062
ab1d848f
NH
5063 /* Copy parms from caller */
5064 rc = -EFAULT;
5065 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5066 goto out;
92d62891 5067 if (is_compat_task()) {
f8b06859 5068 /* Make sure pointers are sane even on 31 bit. */
ab1d848f 5069 rc = -EINVAL;
f8b06859
HC
5070 if ((usrparm.psf_data >> 32) != 0)
5071 goto out;
5072 if ((usrparm.rssd_result >> 32) != 0)
5073 goto out;
5074 usrparm.psf_data &= 0x7fffffffULL;
5075 usrparm.rssd_result &= 0x7fffffffULL;
ab1d848f 5076 }
4a8ef699
SH
5077 /* at least 2 bytes are accessed and should be allocated */
5078 if (usrparm.psf_data_len < 2) {
5079 DBF_DEV_EVENT(DBF_WARNING, device,
5080 "Symmetrix ioctl invalid data length %d",
5081 usrparm.psf_data_len);
5082 rc = -EINVAL;
5083 goto out;
5084 }
ab1d848f
NH
5085 /* alloc I/O data area */
5086 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5087 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5088 if (!psf_data || !rssd_result) {
5089 rc = -ENOMEM;
5090 goto out_free;
5091 }
5092
5093 /* get syscall header from user space */
5094 rc = -EFAULT;
5095 if (copy_from_user(psf_data,
5096 (void __user *)(unsigned long) usrparm.psf_data,
5097 usrparm.psf_data_len))
5098 goto out_free;
52898025
NH
5099 psf0 = psf_data[0];
5100 psf1 = psf_data[1];
ab1d848f
NH
5101
5102 /* setup CCWs for PSF + RSSD */
c5205f2f 5103 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
ab1d848f 5104 if (IS_ERR(cqr)) {
fc19f381 5105 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
ab1d848f
NH
5106 "Could not allocate initialization request");
5107 rc = PTR_ERR(cqr);
5108 goto out_free;
5109 }
5110
5111 cqr->startdev = device;
5112 cqr->memdev = device;
5113 cqr->retries = 3;
5114 cqr->expires = 10 * HZ;
1aae0560 5115 cqr->buildclk = get_tod_clock();
ab1d848f
NH
5116 cqr->status = DASD_CQR_FILLED;
5117
5118 /* Build the ccws */
5119 ccw = cqr->cpaddr;
5120
5121 /* PSF ccw */
5122 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5123 ccw->count = usrparm.psf_data_len;
5124 ccw->flags |= CCW_FLAG_CC;
5125 ccw->cda = (__u32)(addr_t) psf_data;
5126
5127 ccw++;
5128
5129 /* RSSD ccw */
5130 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5131 ccw->count = usrparm.rssd_result_len;
5132 ccw->flags = CCW_FLAG_SLI ;
5133 ccw->cda = (__u32)(addr_t) rssd_result;
5134
5135 rc = dasd_sleep_on(cqr);
5136 if (rc)
5137 goto out_sfree;
5138
5139 rc = -EFAULT;
5140 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5141 rssd_result, usrparm.rssd_result_len))
5142 goto out_sfree;
5143 rc = 0;
5144
5145out_sfree:
5146 dasd_sfree_request(cqr, cqr->memdev);
5147out_free:
5148 kfree(rssd_result);
5149 kfree(psf_data);
5150out:
52898025
NH
5151 DBF_DEV_EVENT(DBF_WARNING, device,
5152 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5153 (int) psf0, (int) psf1, rc);
ab1d848f
NH
5154 return rc;
5155}
5156
1107ccfb 5157static int
8e09f215 5158dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
1107ccfb 5159{
8e09f215
SW
5160 struct dasd_device *device = block->base;
5161
1107ccfb
CH
5162 switch (cmd) {
5163 case BIODASDGATTR:
5164 return dasd_eckd_get_attrib(device, argp);
5165 case BIODASDSATTR:
5166 return dasd_eckd_set_attrib(device, argp);
5167 case BIODASDPSRD:
5168 return dasd_eckd_performance(device, argp);
5169 case BIODASDRLSE:
5170 return dasd_eckd_release(device);
5171 case BIODASDRSRV:
5172 return dasd_eckd_reserve(device);
5173 case BIODASDSLCK:
5174 return dasd_eckd_steal_lock(device);
196339f1
SW
5175 case BIODASDSNID:
5176 return dasd_eckd_snid(device, argp);
ab1d848f
NH
5177 case BIODASDSYMMIO:
5178 return dasd_symm_io(device, argp);
1107ccfb 5179 default:
6b79d14e 5180 return -ENOTTY;
1107ccfb
CH
5181 }
5182}
5183
445b5b49
HH
5184/*
5185 * Dump the range of CCWs into 'page' buffer
5186 * and return number of printed chars.
5187 */
4d284cac 5188static int
445b5b49
HH
5189dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
5190{
5191 int len, count;
5192 char *datap;
5193
5194 len = 0;
5195 while (from <= to) {
773bab4a 5196 len += sprintf(page + len, PRINTK_HEADER
445b5b49
HH
5197 " CCW %p: %08X %08X DAT:",
5198 from, ((int *) from)[0], ((int *) from)[1]);
5199
5200 /* get pointer to data (consider IDALs) */
5201 if (from->flags & CCW_FLAG_IDA)
5202 datap = (char *) *((addr_t *) (addr_t) from->cda);
5203 else
5204 datap = (char *) ((addr_t) from->cda);
5205
5206 /* dump data (max 32 bytes) */
5207 for (count = 0; count < from->count && count < 32; count++) {
5208 if (count % 8 == 0) len += sprintf(page + len, " ");
5209 if (count % 4 == 0) len += sprintf(page + len, " ");
5210 len += sprintf(page + len, "%02x", datap[count]);
5211 }
5212 len += sprintf(page + len, "\n");
5213 from++;
5214 }
5215 return len;
5216}
5217
fc19f381 5218static void
aeec92ca
SH
5219dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5220 char *reason)
fc19f381
SH
5221{
5222 u64 *sense;
a5a0061f 5223 u64 *stat;
aeec92ca
SH
5224
5225 sense = (u64 *) dasd_get_sense(irb);
a5a0061f 5226 stat = (u64 *) &irb->scsw;
fc19f381 5227 if (sense) {
a5a0061f
SW
5228 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5229 "%016llx %016llx %016llx %016llx",
5230 reason, *stat, *((u32 *) (stat + 1)),
ed3640b2 5231 sense[0], sense[1], sense[2], sense[3]);
fc19f381 5232 } else {
a5a0061f
SW
5233 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5234 reason, *stat, *((u32 *) (stat + 1)),
5235 "NO VALID SENSE");
fc19f381
SH
5236 }
5237}
5238
1da177e4
LT
5239/*
5240 * Print sense data and related channel program.
5241 * Parts are printed because printk buffer is only 1024 bytes.
5242 */
f3eb5384 5243static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
8e09f215 5244 struct dasd_ccw_req *req, struct irb *irb)
1da177e4
LT
5245{
5246 char *page;
445b5b49
HH
5247 struct ccw1 *first, *last, *fail, *from, *to;
5248 int len, sl, sct;
1da177e4
LT
5249
5250 page = (char *) get_zeroed_page(GFP_ATOMIC);
5251 if (page == NULL) {
fc19f381
SH
5252 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5253 "No memory to dump sense data\n");
1da177e4
LT
5254 return;
5255 }
445b5b49 5256 /* dump the sense data */
773bab4a 5257 len = sprintf(page, PRINTK_HEADER
1da177e4 5258 " I/O status report for device %s:\n",
2a0217d5 5259 dev_name(&device->cdev->dev));
773bab4a 5260 len += sprintf(page + len, PRINTK_HEADER
a5a0061f
SW
5261 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5262 "CS:%02X RC:%d\n",
5263 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5264 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5265 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5266 req ? req->intrc : 0);
773bab4a 5267 len += sprintf(page + len, PRINTK_HEADER
1da177e4 5268 " device %s: Failing CCW: %p\n",
2a0217d5 5269 dev_name(&device->cdev->dev),
23d805b6 5270 (void *) (addr_t) irb->scsw.cmd.cpa);
1da177e4
LT
5271 if (irb->esw.esw0.erw.cons) {
5272 for (sl = 0; sl < 4; sl++) {
773bab4a 5273 len += sprintf(page + len, PRINTK_HEADER
1da177e4
LT
5274 " Sense(hex) %2d-%2d:",
5275 (8 * sl), ((8 * sl) + 7));
5276
5277 for (sct = 0; sct < 8; sct++) {
5278 len += sprintf(page + len, " %02x",
5279 irb->ecw[8 * sl + sct]);
5280 }
5281 len += sprintf(page + len, "\n");
5282 }
5283
5284 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5285 /* 24 Byte Sense Data */
773bab4a 5286 sprintf(page + len, PRINTK_HEADER
445b5b49
HH
5287 " 24 Byte: %x MSG %x, "
5288 "%s MSGb to SYSOP\n",
5289 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5290 irb->ecw[1] & 0x10 ? "" : "no");
1da177e4
LT
5291 } else {
5292 /* 32 Byte Sense Data */
773bab4a 5293 sprintf(page + len, PRINTK_HEADER
445b5b49
HH
5294 " 32 Byte: Format: %x "
5295 "Exception class %x\n",
5296 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
1da177e4
LT
5297 }
5298 } else {
773bab4a 5299 sprintf(page + len, PRINTK_HEADER
445b5b49 5300 " SORRY - NO VALID SENSE AVAILABLE\n");
1da177e4 5301 }
773bab4a 5302 printk(KERN_ERR "%s", page);
445b5b49 5303
8e09f215
SW
5304 if (req) {
5305 /* req == NULL for unsolicited interrupts */
5306 /* dump the Channel Program (max 140 Bytes per line) */
5307 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
5308 first = req->cpaddr;
5309 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5310 to = min(first + 6, last);
773bab4a 5311 len = sprintf(page, PRINTK_HEADER
8e09f215
SW
5312 " Related CP in req: %p\n", req);
5313 dasd_eckd_dump_ccw_range(first, to, page + len);
773bab4a 5314 printk(KERN_ERR "%s", page);
1da177e4 5315
8e09f215
SW
5316 /* print failing CCW area (maximum 4) */
5317 /* scsw->cda is either valid or zero */
5318 len = 0;
5319 from = ++to;
23d805b6
PO
5320 fail = (struct ccw1 *)(addr_t)
5321 irb->scsw.cmd.cpa; /* failing CCW */
8e09f215
SW
5322 if (from < fail - 2) {
5323 from = fail - 2; /* there is a gap - print header */
773bab4a 5324 len += sprintf(page, PRINTK_HEADER "......\n");
8e09f215
SW
5325 }
5326 to = min(fail + 1, last);
5327 len += dasd_eckd_dump_ccw_range(from, to, page + len);
5328
5329 /* print last CCWs (maximum 2) */
5330 from = max(from, ++to);
5331 if (from < last - 1) {
5332 from = last - 1; /* there is a gap - print header */
773bab4a 5333 len += sprintf(page + len, PRINTK_HEADER "......\n");
8e09f215
SW
5334 }
5335 len += dasd_eckd_dump_ccw_range(from, last, page + len);
5336 if (len > 0)
773bab4a 5337 printk(KERN_ERR "%s", page);
1da177e4 5338 }
1da177e4
LT
5339 free_page((unsigned long) page);
5340}
5341
f3eb5384
SW
5342
5343/*
5344 * Print sense data from a tcw.
5345 */
5346static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5347 struct dasd_ccw_req *req, struct irb *irb)
5348{
5349 char *page;
5350 int len, sl, sct, residual;
f3eb5384 5351 struct tsb *tsb;
ef19298b 5352 u8 *sense, *rcq;
f3eb5384
SW
5353
5354 page = (char *) get_zeroed_page(GFP_ATOMIC);
5355 if (page == NULL) {
fc19f381 5356 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
f3eb5384
SW
5357 "No memory to dump sense data");
5358 return;
5359 }
5360 /* dump the sense data */
773bab4a 5361 len = sprintf(page, PRINTK_HEADER
f3eb5384
SW
5362 " I/O status report for device %s:\n",
5363 dev_name(&device->cdev->dev));
773bab4a 5364 len += sprintf(page + len, PRINTK_HEADER
a5a0061f
SW
5365 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5366 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5367 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5368 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5369 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
a521b048
SH
5370 irb->scsw.tm.fcxs,
5371 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
a5a0061f 5372 req ? req->intrc : 0);
773bab4a 5373 len += sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5374 " device %s: Failing TCW: %p\n",
5375 dev_name(&device->cdev->dev),
5376 (void *) (addr_t) irb->scsw.tm.tcw);
5377
5378 tsb = NULL;
5379 sense = NULL;
a5a0061f 5380 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
f3eb5384
SW
5381 tsb = tcw_get_tsb(
5382 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
5383
b8fde722 5384 if (tsb) {
773bab4a 5385 len += sprintf(page + len, PRINTK_HEADER
f3eb5384 5386 " tsb->length %d\n", tsb->length);
773bab4a 5387 len += sprintf(page + len, PRINTK_HEADER
f3eb5384 5388 " tsb->flags %x\n", tsb->flags);
773bab4a 5389 len += sprintf(page + len, PRINTK_HEADER
f3eb5384 5390 " tsb->dcw_offset %d\n", tsb->dcw_offset);
773bab4a 5391 len += sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5392 " tsb->count %d\n", tsb->count);
5393 residual = tsb->count - 28;
773bab4a 5394 len += sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5395 " residual %d\n", residual);
5396
5397 switch (tsb->flags & 0x07) {
5398 case 1: /* tsa_iostat */
773bab4a 5399 len += sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5400 " tsb->tsa.iostat.dev_time %d\n",
5401 tsb->tsa.iostat.dev_time);
773bab4a 5402 len += sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5403 " tsb->tsa.iostat.def_time %d\n",
5404 tsb->tsa.iostat.def_time);
773bab4a 5405 len += sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5406 " tsb->tsa.iostat.queue_time %d\n",
5407 tsb->tsa.iostat.queue_time);
773bab4a 5408 len += sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5409 " tsb->tsa.iostat.dev_busy_time %d\n",
5410 tsb->tsa.iostat.dev_busy_time);
773bab4a 5411 len += sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5412 " tsb->tsa.iostat.dev_act_time %d\n",
5413 tsb->tsa.iostat.dev_act_time);
5414 sense = tsb->tsa.iostat.sense;
5415 break;
5416 case 2: /* ts_ddpc */
773bab4a 5417 len += sprintf(page + len, PRINTK_HEADER
f3eb5384 5418 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
ef19298b 5419 for (sl = 0; sl < 2; sl++) {
773bab4a 5420 len += sprintf(page + len, PRINTK_HEADER
ef19298b
SW
5421 " tsb->tsa.ddpc.rcq %2d-%2d: ",
5422 (8 * sl), ((8 * sl) + 7));
5423 rcq = tsb->tsa.ddpc.rcq;
f3eb5384
SW
5424 for (sct = 0; sct < 8; sct++) {
5425 len += sprintf(page + len, " %02x",
ef19298b 5426 rcq[8 * sl + sct]);
f3eb5384
SW
5427 }
5428 len += sprintf(page + len, "\n");
5429 }
5430 sense = tsb->tsa.ddpc.sense;
5431 break;
5432 case 3: /* tsa_intrg */
773bab4a 5433 len += sprintf(page + len, PRINTK_HEADER
8693b914 5434 " tsb->tsa.intrg.: not supported yet\n");
f3eb5384
SW
5435 break;
5436 }
5437
5438 if (sense) {
5439 for (sl = 0; sl < 4; sl++) {
773bab4a 5440 len += sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5441 " Sense(hex) %2d-%2d:",
5442 (8 * sl), ((8 * sl) + 7));
5443 for (sct = 0; sct < 8; sct++) {
5444 len += sprintf(page + len, " %02x",
5445 sense[8 * sl + sct]);
5446 }
5447 len += sprintf(page + len, "\n");
5448 }
5449
5450 if (sense[27] & DASD_SENSE_BIT_0) {
5451 /* 24 Byte Sense Data */
773bab4a 5452 sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5453 " 24 Byte: %x MSG %x, "
5454 "%s MSGb to SYSOP\n",
5455 sense[7] >> 4, sense[7] & 0x0f,
5456 sense[1] & 0x10 ? "" : "no");
5457 } else {
5458 /* 32 Byte Sense Data */
773bab4a 5459 sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5460 " 32 Byte: Format: %x "
5461 "Exception class %x\n",
5462 sense[6] & 0x0f, sense[22] >> 4);
5463 }
5464 } else {
773bab4a 5465 sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5466 " SORRY - NO VALID SENSE AVAILABLE\n");
5467 }
5468 } else {
773bab4a 5469 sprintf(page + len, PRINTK_HEADER
f3eb5384
SW
5470 " SORRY - NO TSB DATA AVAILABLE\n");
5471 }
773bab4a 5472 printk(KERN_ERR "%s", page);
f3eb5384
SW
5473 free_page((unsigned long) page);
5474}
5475
5476static void dasd_eckd_dump_sense(struct dasd_device *device,
5477 struct dasd_ccw_req *req, struct irb *irb)
5478{
8fd57520
JH
5479 u8 *sense = dasd_get_sense(irb);
5480
5481 if (scsw_is_tm(&irb->scsw)) {
5482 /*
5483 * In some cases the 'File Protected' or 'Incorrect Length'
5484 * error might be expected and log messages shouldn't be written
5485 * then. Check if the according suppress bit is set.
5486 */
5487 if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5488 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5489 return;
5490 if (scsw_cstat(&irb->scsw) == 0x40 &&
5491 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5492 return;
5493
f3eb5384 5494 dasd_eckd_dump_sense_tcw(device, req, irb);
8fd57520
JH
5495 } else {
5496 /*
ab24fbd3
SH
5497 * In some cases the 'Command Reject' or 'No Record Found'
5498 * error might be expected and log messages shouldn't be
5499 * written then. Check if the according suppress bit is set.
8fd57520 5500 */
ab24fbd3
SH
5501 if (sense && sense[0] & SNS0_CMD_REJECT &&
5502 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5503 return;
5504
8fd57520
JH
5505 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5506 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5507 return;
5508
f3eb5384 5509 dasd_eckd_dump_sense_ccw(device, req, irb);
8fd57520 5510 }
f3eb5384
SW
5511}
5512
501183f2 5513static int dasd_eckd_pm_freeze(struct dasd_device *device)
d41dd122
SH
5514{
5515 /*
5516 * the device should be disconnected from our LCU structure
5517 * on restore we will reconnect it and reread LCU specific
5518 * information like PAV support that might have changed
5519 */
5520 dasd_alias_remove_device(device);
5521 dasd_alias_disconnect_device_from_lcu(device);
5522
5523 return 0;
5524}
5525
501183f2 5526static int dasd_eckd_restore_device(struct dasd_device *device)
d41dd122 5527{
543691a4 5528 struct dasd_eckd_private *private = device->private;
6fca97a9 5529 struct dasd_eckd_characteristics temp_rdc_data;
f9f8d02f 5530 int rc;
d41dd122 5531 struct dasd_uid temp_uid;
a7602f6c 5532 unsigned long flags;
ea4da6ea 5533 unsigned long cqr_flags = 0;
d41dd122 5534
d41dd122 5535 /* Read Configuration Data */
251afd69
SH
5536 rc = dasd_eckd_read_conf(device);
5537 if (rc) {
5538 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5539 "Read configuration data failed, rc=%d", rc);
5540 goto out_err;
5541 }
d41dd122 5542
2dedf0d9
SH
5543 dasd_eckd_get_uid(device, &temp_uid);
5544 /* Generate device unique id */
5545 rc = dasd_eckd_generate_uid(device);
5546 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
d41dd122 5547 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
a7602f6c
SH
5548 dev_err(&device->cdev->dev, "The UID of the DASD has "
5549 "changed\n");
2dedf0d9 5550 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
d41dd122
SH
5551 if (rc)
5552 goto out_err;
d41dd122
SH
5553
5554 /* register lcu with alias handling, enable PAV if this is a new lcu */
f9f8d02f
SH
5555 rc = dasd_alias_make_device_known_to_lcu(device);
5556 if (rc)
251afd69 5557 goto out_err;
ea4da6ea
SH
5558
5559 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
5560 dasd_eckd_validate_server(device, cqr_flags);
f4ac1d02
SW
5561
5562 /* RE-Read Configuration Data */
251afd69
SH
5563 rc = dasd_eckd_read_conf(device);
5564 if (rc) {
5565 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5566 "Read configuration data failed, rc=%d", rc);
5567 goto out_err2;
5568 }
d41dd122
SH
5569
5570 /* Read Feature Codes */
68d1e5f0 5571 dasd_eckd_read_features(device);
d41dd122 5572
c729696b
JH
5573 /* Read Volume Information */
5574 rc = dasd_eckd_read_vol_info(device);
5575 if (rc)
5576 goto out_err2;
5577
5578 /* Read Extent Pool Information */
5579 rc = dasd_eckd_read_ext_pool_info(device);
5580 if (rc)
5581 goto out_err2;
5582
d41dd122 5583 /* Read Device Characteristics */
68b781fe 5584 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
6fca97a9 5585 &temp_rdc_data, 64);
d41dd122 5586 if (rc) {
b8ed5dd5
SH
5587 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5588 "Read device characteristic failed, rc=%d", rc);
251afd69 5589 goto out_err2;
d41dd122 5590 }
a7602f6c 5591 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
6fca97a9 5592 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
a7602f6c 5593 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
d41dd122
SH
5594
5595 /* add device to alias management */
5596 dasd_alias_add_device(device);
5597
5598 return 0;
5599
251afd69
SH
5600out_err2:
5601 dasd_alias_disconnect_device_from_lcu(device);
d41dd122 5602out_err:
e6125fba 5603 return -1;
d41dd122
SH
5604}
5605
501183f2
SH
5606static int dasd_eckd_reload_device(struct dasd_device *device)
5607{
543691a4 5608 struct dasd_eckd_private *private = device->private;
501183f2 5609 int rc, old_base;
2dedf0d9
SH
5610 char print_uid[60];
5611 struct dasd_uid uid;
5612 unsigned long flags;
501183f2 5613
59a9ed5f
SH
5614 /*
5615 * remove device from alias handling to prevent new requests
5616 * from being scheduled on the wrong alias device
5617 */
5618 dasd_alias_remove_device(device);
5619
2dedf0d9 5620 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
501183f2 5621 old_base = private->uid.base_unit_addr;
2dedf0d9
SH
5622 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5623
501183f2
SH
5624 /* Read Configuration Data */
5625 rc = dasd_eckd_read_conf(device);
5626 if (rc)
5627 goto out_err;
5628
2dedf0d9 5629 rc = dasd_eckd_generate_uid(device);
501183f2
SH
5630 if (rc)
5631 goto out_err;
501183f2
SH
5632 /*
5633 * update unit address configuration and
5634 * add device to alias management
5635 */
5636 dasd_alias_update_add_device(device);
5637
2dedf0d9
SH
5638 dasd_eckd_get_uid(device, &uid);
5639
5640 if (old_base != uid.base_unit_addr) {
5641 if (strlen(uid.vduit) > 0)
5642 snprintf(print_uid, sizeof(print_uid),
5643 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
5644 uid.ssid, uid.base_unit_addr, uid.vduit);
501183f2 5645 else
2dedf0d9
SH
5646 snprintf(print_uid, sizeof(print_uid),
5647 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
5648 uid.ssid, uid.base_unit_addr);
501183f2
SH
5649
5650 dev_info(&device->cdev->dev,
5651 "An Alias device was reassigned to a new base device "
2dedf0d9 5652 "with UID: %s\n", print_uid);
501183f2
SH
5653 }
5654 return 0;
5655
5656out_err:
5657 return -1;
5658}
5659
5db8440c
SH
5660static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5661 struct dasd_rssd_messages *messages,
5662 __u8 lpum)
5663{
5664 struct dasd_rssd_messages *message_buf;
5665 struct dasd_psf_prssd_data *prssdp;
5db8440c
SH
5666 struct dasd_ccw_req *cqr;
5667 struct ccw1 *ccw;
5668 int rc;
5669
5db8440c
SH
5670 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5671 (sizeof(struct dasd_psf_prssd_data) +
5672 sizeof(struct dasd_rssd_messages)),
c5205f2f 5673 device, NULL);
5db8440c
SH
5674 if (IS_ERR(cqr)) {
5675 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5676 "Could not allocate read message buffer request");
5677 return PTR_ERR(cqr);
5678 }
5679
af775210
SH
5680 cqr->lpm = lpum;
5681retry:
5db8440c
SH
5682 cqr->startdev = device;
5683 cqr->memdev = device;
5684 cqr->block = NULL;
5db8440c 5685 cqr->expires = 10 * HZ;
5db8440c 5686 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
b179b037
SH
5687 /* dasd_sleep_on_immediatly does not do complex error
5688 * recovery so clear erp flag and set retry counter to
5689 * do basic erp */
5690 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5691 cqr->retries = 256;
5db8440c
SH
5692
5693 /* Prepare for Read Subsystem Data */
5694 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5695 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5696 prssdp->order = PSF_ORDER_PRSSD;
5697 prssdp->suborder = 0x03; /* Message Buffer */
5698 /* all other bytes of prssdp must be zero */
5699
5700 ccw = cqr->cpaddr;
5701 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5702 ccw->count = sizeof(struct dasd_psf_prssd_data);
5703 ccw->flags |= CCW_FLAG_CC;
5704 ccw->flags |= CCW_FLAG_SLI;
5705 ccw->cda = (__u32)(addr_t) prssdp;
5706
5707 /* Read Subsystem Data - message buffer */
5708 message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5709 memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5710
5711 ccw++;
5712 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5713 ccw->count = sizeof(struct dasd_rssd_messages);
5714 ccw->flags |= CCW_FLAG_SLI;
5715 ccw->cda = (__u32)(addr_t) message_buf;
5716
5717 cqr->buildclk = get_tod_clock();
5718 cqr->status = DASD_CQR_FILLED;
5719 rc = dasd_sleep_on_immediatly(cqr);
5720 if (rc == 0) {
5721 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5722 message_buf = (struct dasd_rssd_messages *)
5723 (prssdp + 1);
5724 memcpy(messages, message_buf,
5725 sizeof(struct dasd_rssd_messages));
af775210
SH
5726 } else if (cqr->lpm) {
5727 /*
5728 * on z/VM we might not be able to do I/O on the requested path
5729 * but instead we get the required information on any path
5730 * so retry with open path mask
5731 */
5732 cqr->lpm = 0;
5733 goto retry;
5db8440c
SH
5734 } else
5735 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5736 "Reading messages failed with rc=%d\n"
5737 , rc);
5738 dasd_sfree_request(cqr, cqr->memdev);
5739 return rc;
5740}
5741
5a3b7b11
SH
5742static int dasd_eckd_query_host_access(struct dasd_device *device,
5743 struct dasd_psf_query_host_access *data)
5744{
5745 struct dasd_eckd_private *private = device->private;
5746 struct dasd_psf_query_host_access *host_access;
5747 struct dasd_psf_prssd_data *prssdp;
5748 struct dasd_ccw_req *cqr;
5749 struct ccw1 *ccw;
5750 int rc;
5751
5752 /* not available for HYPER PAV alias devices */
5753 if (!device->block && private->lcu->pav == HYPER_PAV)
5754 return -EOPNOTSUPP;
5755
ccd53fa2
SH
5756 /* may not be supported by the storage server */
5757 if (!(private->features.feature[14] & 0x80))
5758 return -EOPNOTSUPP;
5759
5a3b7b11
SH
5760 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5761 sizeof(struct dasd_psf_prssd_data) + 1,
c5205f2f 5762 device, NULL);
5a3b7b11
SH
5763 if (IS_ERR(cqr)) {
5764 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5765 "Could not allocate read message buffer request");
5766 return PTR_ERR(cqr);
5767 }
5768 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5769 if (!host_access) {
5770 dasd_sfree_request(cqr, device);
5771 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5772 "Could not allocate host_access buffer");
5773 return -ENOMEM;
5774 }
5775 cqr->startdev = device;
5776 cqr->memdev = device;
5777 cqr->block = NULL;
5778 cqr->retries = 256;
5779 cqr->expires = 10 * HZ;
5780
5781 /* Prepare for Read Subsystem Data */
5782 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5783 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5784 prssdp->order = PSF_ORDER_PRSSD;
5785 prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
5786 /* LSS and Volume that will be queried */
5787 prssdp->lss = private->ned->ID;
5788 prssdp->volume = private->ned->unit_addr;
5789 /* all other bytes of prssdp must be zero */
5790
5791 ccw = cqr->cpaddr;
5792 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5793 ccw->count = sizeof(struct dasd_psf_prssd_data);
5794 ccw->flags |= CCW_FLAG_CC;
5795 ccw->flags |= CCW_FLAG_SLI;
5796 ccw->cda = (__u32)(addr_t) prssdp;
5797
5798 /* Read Subsystem Data - query host access */
5799 ccw++;
5800 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5801 ccw->count = sizeof(struct dasd_psf_query_host_access);
5802 ccw->flags |= CCW_FLAG_SLI;
5803 ccw->cda = (__u32)(addr_t) host_access;
5804
5805 cqr->buildclk = get_tod_clock();
5806 cqr->status = DASD_CQR_FILLED;
ab24fbd3
SH
5807 /* the command might not be supported, suppress error message */
5808 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
f50af850 5809 rc = dasd_sleep_on_interruptible(cqr);
5a3b7b11
SH
5810 if (rc == 0) {
5811 *data = *host_access;
5812 } else {
5813 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5814 "Reading host access data failed with rc=%d\n",
5815 rc);
5816 rc = -EOPNOTSUPP;
5817 }
5818
5819 dasd_sfree_request(cqr, cqr->memdev);
5820 kfree(host_access);
5821 return rc;
5822}
5823/*
5824 * return number of grouped devices
5825 */
5826static int dasd_eckd_host_access_count(struct dasd_device *device)
5827{
5828 struct dasd_psf_query_host_access *access;
5829 struct dasd_ckd_path_group_entry *entry;
5830 struct dasd_ckd_host_information *info;
5831 int count = 0;
5832 int rc, i;
5833
5834 access = kzalloc(sizeof(*access), GFP_NOIO);
5835 if (!access) {
5836 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5837 "Could not allocate access buffer");
5838 return -ENOMEM;
5839 }
5840 rc = dasd_eckd_query_host_access(device, access);
5841 if (rc) {
5842 kfree(access);
5843 return rc;
5844 }
5845
5846 info = (struct dasd_ckd_host_information *)
5847 access->host_access_information;
5848 for (i = 0; i < info->entry_count; i++) {
5849 entry = (struct dasd_ckd_path_group_entry *)
5850 (info->entry + i * info->entry_size);
5851 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
5852 count++;
5853 }
5854
5855 kfree(access);
5856 return count;
5857}
5858
5859/*
5860 * write host access information to a sequential file
5861 */
5862static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
5863{
5864 struct dasd_psf_query_host_access *access;
5865 struct dasd_ckd_path_group_entry *entry;
5866 struct dasd_ckd_host_information *info;
5867 char sysplex[9] = "";
c7848e14 5868 int rc, i;
5a3b7b11
SH
5869
5870 access = kzalloc(sizeof(*access), GFP_NOIO);
5871 if (!access) {
5872 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5873 "Could not allocate access buffer");
5874 return -ENOMEM;
5875 }
5876 rc = dasd_eckd_query_host_access(device, access);
5877 if (rc) {
5878 kfree(access);
5879 return rc;
5880 }
5881
5882 info = (struct dasd_ckd_host_information *)
5883 access->host_access_information;
5884 for (i = 0; i < info->entry_count; i++) {
5885 entry = (struct dasd_ckd_path_group_entry *)
5886 (info->entry + i * info->entry_size);
5887 /* PGID */
c7848e14 5888 seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
5a3b7b11
SH
5889 /* FLAGS */
5890 seq_printf(m, "status_flags %02x\n", entry->status_flags);
5891 /* SYSPLEX NAME */
5892 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
5893 EBCASC(sysplex, sizeof(sysplex));
5894 seq_printf(m, "sysplex_name %8s\n", sysplex);
5895 /* SUPPORTED CYLINDER */
5896 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
5897 /* TIMESTAMP */
5898 seq_printf(m, "timestamp %lu\n", (unsigned long)
5899 entry->timestamp);
5900 }
5901 kfree(access);
5902
5903 return 0;
5904}
5905
5db8440c
SH
5906/*
5907 * Perform Subsystem Function - CUIR response
5908 */
5909static int
5910dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
a521b048 5911 __u32 message_id, __u8 lpum)
5db8440c
SH
5912{
5913 struct dasd_psf_cuir_response *psf_cuir;
a521b048 5914 int pos = pathmask_to_pos(lpum);
5db8440c
SH
5915 struct dasd_ccw_req *cqr;
5916 struct ccw1 *ccw;
5917 int rc;
5918
5919 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
c5205f2f
SO
5920 sizeof(struct dasd_psf_cuir_response),
5921 device, NULL);
5db8440c
SH
5922
5923 if (IS_ERR(cqr)) {
5924 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5925 "Could not allocate PSF-CUIR request");
5926 return PTR_ERR(cqr);
5927 }
5928
5929 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
5930 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
5931 psf_cuir->cc = response;
a521b048 5932 psf_cuir->chpid = device->path[pos].chpid;
5db8440c 5933 psf_cuir->message_id = message_id;
a521b048
SH
5934 psf_cuir->cssid = device->path[pos].cssid;
5935 psf_cuir->ssid = device->path[pos].ssid;
5db8440c
SH
5936 ccw = cqr->cpaddr;
5937 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5938 ccw->cda = (__u32)(addr_t)psf_cuir;
b179b037 5939 ccw->flags = CCW_FLAG_SLI;
5db8440c
SH
5940 ccw->count = sizeof(struct dasd_psf_cuir_response);
5941
5942 cqr->startdev = device;
5943 cqr->memdev = device;
5944 cqr->block = NULL;
5945 cqr->retries = 256;
5946 cqr->expires = 10*HZ;
5947 cqr->buildclk = get_tod_clock();
5948 cqr->status = DASD_CQR_FILLED;
b179b037 5949 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5db8440c
SH
5950
5951 rc = dasd_sleep_on(cqr);
5952
5953 dasd_sfree_request(cqr, cqr->memdev);
5954 return rc;
5955}
5956
b179b037
SH
5957/*
5958 * return configuration data that is referenced by record selector
5959 * if a record selector is specified or per default return the
5960 * conf_data pointer for the path specified by lpum
5961 */
5962static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
5963 __u8 lpum,
5964 struct dasd_cuir_message *cuir)
5db8440c 5965{
b179b037
SH
5966 struct dasd_conf_data *conf_data;
5967 int path, pos;
5db8440c 5968
b179b037
SH
5969 if (cuir->record_selector == 0)
5970 goto out;
5971 for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
c9346151 5972 conf_data = device->path[pos].conf_data;
b179b037
SH
5973 if (conf_data->gneq.record_selector ==
5974 cuir->record_selector)
5975 return conf_data;
5db8440c 5976 }
b179b037 5977out:
c9346151 5978 return device->path[pathmask_to_pos(lpum)].conf_data;
5db8440c
SH
5979}
5980
5981/*
b179b037
SH
5982 * This function determines the scope of a reconfiguration request by
5983 * analysing the path and device selection data provided in the CUIR request.
5984 * Returns a path mask containing CUIR affected paths for the give device.
5985 *
5986 * If the CUIR request does not contain the required information return the
5987 * path mask of the path the attention message for the CUIR request was reveived
5988 * on.
5989 */
5990static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
5991 struct dasd_cuir_message *cuir)
5992{
5993 struct dasd_conf_data *ref_conf_data;
5994 unsigned long bitmask = 0, mask = 0;
b179b037
SH
5995 struct dasd_conf_data *conf_data;
5996 unsigned int pos, path;
5997 char *ref_gneq, *gneq;
5998 char *ref_ned, *ned;
5999 int tbcpm = 0;
6000
6001 /* if CUIR request does not specify the scope use the path
6002 the attention message was presented on */
6003 if (!cuir->ned_map ||
6004 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6005 return lpum;
6006
b179b037
SH
6007 /* get reference conf data */
6008 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6009 /* reference ned is determined by ned_map field */
6010 pos = 8 - ffs(cuir->ned_map);
6011 ref_ned = (char *)&ref_conf_data->neds[pos];
6012 ref_gneq = (char *)&ref_conf_data->gneq;
6013 /* transfer 24 bit neq_map to mask */
6014 mask = cuir->neq_map[2];
6015 mask |= cuir->neq_map[1] << 8;
6016 mask |= cuir->neq_map[0] << 16;
6017
c9346151 6018 for (path = 0; path < 8; path++) {
b179b037
SH
6019 /* initialise data per path */
6020 bitmask = mask;
c9346151 6021 conf_data = device->path[path].conf_data;
b179b037
SH
6022 pos = 8 - ffs(cuir->ned_map);
6023 ned = (char *) &conf_data->neds[pos];
6024 /* compare reference ned and per path ned */
6025 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6026 continue;
6027 gneq = (char *)&conf_data->gneq;
6028 /* compare reference gneq and per_path gneq under
6029 24 bit mask where mask bit 0 equals byte 7 of
6030 the gneq and mask bit 24 equals byte 31 */
6031 while (bitmask) {
6032 pos = ffs(bitmask) - 1;
6033 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6034 != 0)
6035 break;
6036 clear_bit(pos, &bitmask);
6037 }
6038 if (bitmask)
6039 continue;
6040 /* device and path match the reference values
6041 add path to CUIR scope */
c9346151 6042 tbcpm |= 0x80 >> path;
b179b037
SH
6043 }
6044 return tbcpm;
6045}
6046
6047static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
a521b048 6048 unsigned long paths, int action)
b179b037 6049{
b179b037
SH
6050 int pos;
6051
6052 while (paths) {
6053 /* get position of bit in mask */
a521b048 6054 pos = 8 - ffs(paths);
b179b037 6055 /* get channel path descriptor from this position */
b179b037 6056 if (action == CUIR_QUIESCE)
a521b048
SH
6057 pr_warn("Service on the storage server caused path %x.%02x to go offline",
6058 device->path[pos].cssid,
6059 device->path[pos].chpid);
b179b037 6060 else if (action == CUIR_RESUME)
a521b048
SH
6061 pr_info("Path %x.%02x is back online after service on the storage server",
6062 device->path[pos].cssid,
6063 device->path[pos].chpid);
6064 clear_bit(7 - pos, &paths);
b179b037
SH
6065 }
6066}
6067
6068static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6069 struct dasd_cuir_message *cuir)
6070{
6071 unsigned long tbcpm;
6072
6073 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6074 /* nothing to do if path is not in use */
c9346151 6075 if (!(dasd_path_get_opm(device) & tbcpm))
b179b037 6076 return 0;
c9346151 6077 if (!(dasd_path_get_opm(device) & ~tbcpm)) {
b179b037
SH
6078 /* no path would be left if the CUIR action is taken
6079 return error */
6080 return -EINVAL;
6081 }
6082 /* remove device from operational path mask */
c9346151
SH
6083 dasd_path_remove_opm(device, tbcpm);
6084 dasd_path_add_cuirpm(device, tbcpm);
b179b037
SH
6085 return tbcpm;
6086}
6087
6088/*
6089 * walk through all devices and build a path mask to quiesce them
6090 * return an error if the last path to a device would be removed
5db8440c
SH
6091 *
6092 * if only part of the devices are quiesced and an error
6093 * occurs no onlining necessary, the storage server will
6094 * notify the already set offline devices again
6095 */
6096static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
b179b037 6097 struct dasd_cuir_message *cuir)
5db8440c 6098{
543691a4 6099 struct dasd_eckd_private *private = device->private;
5db8440c 6100 struct alias_pav_group *pavgroup, *tempgroup;
5db8440c 6101 struct dasd_device *dev, *n;
b179b037
SH
6102 unsigned long paths = 0;
6103 unsigned long flags;
6104 int tbcpm;
5db8440c 6105
5db8440c 6106 /* active devices */
b179b037 6107 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
5db8440c 6108 alias_list) {
b179b037
SH
6109 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6110 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6111 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6112 if (tbcpm < 0)
6113 goto out_err;
6114 paths |= tbcpm;
5db8440c 6115 }
5db8440c 6116 /* inactive devices */
b179b037 6117 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
5db8440c 6118 alias_list) {
b179b037
SH
6119 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6120 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6121 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6122 if (tbcpm < 0)
6123 goto out_err;
6124 paths |= tbcpm;
5db8440c 6125 }
5db8440c
SH
6126 /* devices in PAV groups */
6127 list_for_each_entry_safe(pavgroup, tempgroup,
6128 &private->lcu->grouplist, group) {
6129 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6130 alias_list) {
b179b037
SH
6131 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6132 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6133 spin_unlock_irqrestore(
6134 get_ccwdev_lock(dev->cdev), flags);
6135 if (tbcpm < 0)
6136 goto out_err;
6137 paths |= tbcpm;
5db8440c
SH
6138 }
6139 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6140 alias_list) {
b179b037
SH
6141 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6142 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6143 spin_unlock_irqrestore(
6144 get_ccwdev_lock(dev->cdev), flags);
6145 if (tbcpm < 0)
6146 goto out_err;
6147 paths |= tbcpm;
5db8440c
SH
6148 }
6149 }
b179b037 6150 /* notify user about all paths affected by CUIR action */
a521b048 6151 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
b179b037
SH
6152 return 0;
6153out_err:
6154 return tbcpm;
5db8440c
SH
6155}
6156
6157static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
b179b037 6158 struct dasd_cuir_message *cuir)
5db8440c 6159{
543691a4 6160 struct dasd_eckd_private *private = device->private;
5db8440c 6161 struct alias_pav_group *pavgroup, *tempgroup;
5db8440c 6162 struct dasd_device *dev, *n;
b179b037
SH
6163 unsigned long paths = 0;
6164 int tbcpm;
5db8440c 6165
5db8440c
SH
6166 /*
6167 * the path may have been added through a generic path event before
6168 * only trigger path verification if the path is not already in use
6169 */
5db8440c
SH
6170 list_for_each_entry_safe(dev, n,
6171 &private->lcu->active_devices,
6172 alias_list) {
b179b037
SH
6173 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6174 paths |= tbcpm;
c9346151
SH
6175 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6176 dasd_path_add_tbvpm(dev, tbcpm);
5db8440c
SH
6177 dasd_schedule_device_bh(dev);
6178 }
6179 }
5db8440c
SH
6180 list_for_each_entry_safe(dev, n,
6181 &private->lcu->inactive_devices,
6182 alias_list) {
b179b037
SH
6183 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6184 paths |= tbcpm;
c9346151
SH
6185 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6186 dasd_path_add_tbvpm(dev, tbcpm);
5db8440c
SH
6187 dasd_schedule_device_bh(dev);
6188 }
6189 }
5db8440c
SH
6190 /* devices in PAV groups */
6191 list_for_each_entry_safe(pavgroup, tempgroup,
6192 &private->lcu->grouplist,
6193 group) {
6194 list_for_each_entry_safe(dev, n,
6195 &pavgroup->baselist,
6196 alias_list) {
b179b037
SH
6197 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6198 paths |= tbcpm;
c9346151
SH
6199 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6200 dasd_path_add_tbvpm(dev, tbcpm);
5db8440c
SH
6201 dasd_schedule_device_bh(dev);
6202 }
6203 }
6204 list_for_each_entry_safe(dev, n,
6205 &pavgroup->aliaslist,
6206 alias_list) {
b179b037
SH
6207 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6208 paths |= tbcpm;
c9346151
SH
6209 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6210 dasd_path_add_tbvpm(dev, tbcpm);
5db8440c
SH
6211 dasd_schedule_device_bh(dev);
6212 }
6213 }
6214 }
b179b037 6215 /* notify user about all paths affected by CUIR action */
a521b048 6216 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
b179b037 6217 return 0;
5db8440c
SH
6218}
6219
6220static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6221 __u8 lpum)
6222{
6223 struct dasd_cuir_message *cuir = messages;
a521b048 6224 int response;
5db8440c 6225
b179b037
SH
6226 DBF_DEV_EVENT(DBF_WARNING, device,
6227 "CUIR request: %016llx %016llx %016llx %08x",
6228 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6229 ((u32 *)cuir)[3]);
5db8440c
SH
6230
6231 if (cuir->code == CUIR_QUIESCE) {
6232 /* quiesce */
a521b048 6233 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
b179b037
SH
6234 response = PSF_CUIR_LAST_PATH;
6235 else
6236 response = PSF_CUIR_COMPLETED;
5db8440c
SH
6237 } else if (cuir->code == CUIR_RESUME) {
6238 /* resume */
a521b048 6239 dasd_eckd_cuir_resume(device, lpum, cuir);
b179b037 6240 response = PSF_CUIR_COMPLETED;
5db8440c
SH
6241 } else
6242 response = PSF_CUIR_NOT_SUPPORTED;
6243
b179b037 6244 dasd_eckd_psf_cuir_response(device, response,
a521b048 6245 cuir->message_id, lpum);
b179b037
SH
6246 DBF_DEV_EVENT(DBF_WARNING, device,
6247 "CUIR response: %d on message ID %08x", response,
6248 cuir->message_id);
b179b037
SH
6249 /* to make sure there is no attention left schedule work again */
6250 device->discipline->check_attention(device, lpum);
5db8440c
SH
6251}
6252
6253static void dasd_eckd_check_attention_work(struct work_struct *work)
6254{
6255 struct check_attention_work_data *data;
6256 struct dasd_rssd_messages *messages;
6257 struct dasd_device *device;
6258 int rc;
6259
6260 data = container_of(work, struct check_attention_work_data, worker);
6261 device = data->device;
5db8440c
SH
6262 messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6263 if (!messages) {
6264 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6265 "Could not allocate attention message buffer");
6266 goto out;
6267 }
5db8440c
SH
6268 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6269 if (rc)
6270 goto out;
5db8440c
SH
6271 if (messages->length == ATTENTION_LENGTH_CUIR &&
6272 messages->format == ATTENTION_FORMAT_CUIR)
6273 dasd_eckd_handle_cuir(device, messages, data->lpum);
5db8440c
SH
6274out:
6275 dasd_put_device(device);
6276 kfree(messages);
6277 kfree(data);
6278}
6279
6280static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6281{
6282 struct check_attention_work_data *data;
6283
6284 data = kzalloc(sizeof(*data), GFP_ATOMIC);
6285 if (!data)
6286 return -ENOMEM;
6287 INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6288 dasd_get_device(device);
6289 data->device = device;
6290 data->lpum = lpum;
6291 schedule_work(&data->worker);
6292 return 0;
6293}
6294
a521b048
SH
6295static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6296{
6297 if (~lpum & dasd_path_get_opm(device)) {
6298 dasd_path_add_nohpfpm(device, lpum);
6299 dasd_path_remove_opm(device, lpum);
6300 dev_err(&device->cdev->dev,
6301 "Channel path %02X lost HPF functionality and is disabled\n",
6302 lpum);
6303 return 1;
6304 }
6305 return 0;
6306}
6307
6308static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6309{
6310 struct dasd_eckd_private *private = device->private;
6311
6312 dev_err(&device->cdev->dev,
6313 "High Performance FICON disabled\n");
6314 private->fcx_max_data = 0;
6315}
6316
6317static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6318{
6319 struct dasd_eckd_private *private = device->private;
6320
6321 return private->fcx_max_data ? 1 : 0;
6322}
6323
6324static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6325 struct irb *irb)
6326{
6327 struct dasd_eckd_private *private = device->private;
6328
6329 if (!private->fcx_max_data) {
6330 /* sanity check for no HPF, the error makes no sense */
6331 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6332 "Trying to disable HPF for a non HPF device");
6333 return;
6334 }
6335 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6336 dasd_eckd_disable_hpf_device(device);
6337 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6338 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6339 return;
6340 dasd_eckd_disable_hpf_device(device);
6341 dasd_path_set_tbvpm(device,
6342 dasd_path_get_hpfpm(device));
6343 }
6344 /*
6345 * prevent that any new I/O ist started on the device and schedule a
6346 * requeue of existing requests
6347 */
6348 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6349 dasd_schedule_requeue(device);
6350}
6351
d41dd122 6352static struct ccw_driver dasd_eckd_driver = {
3bda058b
SO
6353 .driver = {
6354 .name = "dasd-eckd",
6355 .owner = THIS_MODULE,
6356 },
d41dd122
SH
6357 .ids = dasd_eckd_ids,
6358 .probe = dasd_eckd_probe,
6359 .remove = dasd_generic_remove,
6360 .set_offline = dasd_generic_set_offline,
6361 .set_online = dasd_eckd_set_online,
6362 .notify = dasd_generic_notify,
a4d26c6a 6363 .path_event = dasd_generic_path_event,
4679e893 6364 .shutdown = dasd_generic_shutdown,
d41dd122
SH
6365 .freeze = dasd_generic_pm_freeze,
6366 .thaw = dasd_generic_restore_device,
6367 .restore = dasd_generic_restore_device,
a23ed009 6368 .uc_handler = dasd_generic_uc_handler,
420f42ec 6369 .int_class = IRQIO_DAS,
d41dd122 6370};
f3eb5384 6371
1da177e4
LT
6372/*
6373 * max_blocks is dependent on the amount of storage that is available
6374 * in the static io buffer for each device. Currently each device has
6375 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
6376 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
6377 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
6378 * addition we have one define extent ccw + 16 bytes of data and one
6379 * locate record ccw + 16 bytes of data. That makes:
6380 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
6381 * We want to fit two into the available memory so that we can immediately
6382 * start the next request if one finishes off. That makes 249.5 blocks
6383 * for one request. Give a little safety and the result is 240.
6384 */
6385static struct dasd_discipline dasd_eckd_discipline = {
6386 .owner = THIS_MODULE,
6387 .name = "ECKD",
6388 .ebcname = "ECKD",
ef19298b 6389 .max_blocks = 190,
1da177e4 6390 .check_device = dasd_eckd_check_characteristics,
8e09f215 6391 .uncheck_device = dasd_eckd_uncheck_device,
1da177e4 6392 .do_analysis = dasd_eckd_do_analysis,
a4d26c6a 6393 .verify_path = dasd_eckd_verify_path,
d42e1712 6394 .basic_to_ready = dasd_eckd_basic_to_ready,
8e09f215 6395 .online_to_ready = dasd_eckd_online_to_ready,
daa991bf 6396 .basic_to_known = dasd_eckd_basic_to_known,
1da177e4
LT
6397 .fill_geometry = dasd_eckd_fill_geometry,
6398 .start_IO = dasd_start_IO,
6399 .term_IO = dasd_term_IO,
8e09f215 6400 .handle_terminated_request = dasd_eckd_handle_terminated_request,
1da177e4 6401 .format_device = dasd_eckd_format_device,
8fd57520 6402 .check_device_format = dasd_eckd_check_device_format,
1da177e4
LT
6403 .erp_action = dasd_eckd_erp_action,
6404 .erp_postaction = dasd_eckd_erp_postaction,
5a27e60d 6405 .check_for_device_change = dasd_eckd_check_for_device_change,
8e09f215
SW
6406 .build_cp = dasd_eckd_build_alias_cp,
6407 .free_cp = dasd_eckd_free_alias_cp,
1da177e4 6408 .dump_sense = dasd_eckd_dump_sense,
fc19f381 6409 .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
1da177e4 6410 .fill_info = dasd_eckd_fill_info,
1107ccfb 6411 .ioctl = dasd_eckd_ioctl,
d41dd122
SH
6412 .freeze = dasd_eckd_pm_freeze,
6413 .restore = dasd_eckd_restore_device,
501183f2 6414 .reload = dasd_eckd_reload_device,
2dedf0d9 6415 .get_uid = dasd_eckd_get_uid,
f1633031 6416 .kick_validate = dasd_eckd_kick_validate_server,
5db8440c 6417 .check_attention = dasd_eckd_check_attention,
5a3b7b11
SH
6418 .host_access_count = dasd_eckd_host_access_count,
6419 .hosts_print = dasd_hosts_print,
a521b048
SH
6420 .handle_hpf_error = dasd_eckd_handle_hpf_error,
6421 .disable_hpf = dasd_eckd_disable_hpf_device,
6422 .hpf_enabled = dasd_eckd_hpf_enabled,
6423 .reset_path = dasd_eckd_reset_path,
c729696b
JH
6424 .is_ese = dasd_eckd_is_ese,
6425 .space_allocated = dasd_eckd_space_allocated,
6426 .space_configured = dasd_eckd_space_configured,
6427 .logical_capacity = dasd_eckd_logical_capacity,
91dc4a19 6428 .release_space = dasd_eckd_release_space,
c729696b
JH
6429 .ext_pool_id = dasd_eckd_ext_pool_id,
6430 .ext_size = dasd_eckd_ext_size,
6431 .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6432 .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6433 .ext_pool_oos = dasd_eckd_ext_pool_oos,
5e2b17e7
JH
6434 .ese_format = dasd_eckd_ese_format,
6435 .ese_read = dasd_eckd_ese_read,
1da177e4
LT
6436};
6437
6438static int __init
6439dasd_eckd_init(void)
6440{
736e6ea0
SO
6441 int ret;
6442
1da177e4 6443 ASCEBC(dasd_eckd_discipline.ebcname, 4);
f932bcea
SW
6444 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6445 GFP_KERNEL | GFP_DMA);
6446 if (!dasd_reserve_req)
6447 return -ENOMEM;
a4d26c6a
SW
6448 path_verification_worker = kmalloc(sizeof(*path_verification_worker),
6449 GFP_KERNEL | GFP_DMA);
6450 if (!path_verification_worker) {
6451 kfree(dasd_reserve_req);
6452 return -ENOMEM;
6453 }
558b9ef0
SW
6454 rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6455 if (!rawpadpage) {
6456 kfree(path_verification_worker);
6457 kfree(dasd_reserve_req);
6458 return -ENOMEM;
6459 }
736e6ea0
SO
6460 ret = ccw_driver_register(&dasd_eckd_driver);
6461 if (!ret)
6462 wait_for_device_probe();
a4d26c6a
SW
6463 else {
6464 kfree(path_verification_worker);
f932bcea 6465 kfree(dasd_reserve_req);
558b9ef0 6466 free_page((unsigned long)rawpadpage);
a4d26c6a 6467 }
736e6ea0 6468 return ret;
1da177e4
LT
6469}
6470
6471static void __exit
6472dasd_eckd_cleanup(void)
6473{
6474 ccw_driver_unregister(&dasd_eckd_driver);
a4d26c6a 6475 kfree(path_verification_worker);
f932bcea 6476 kfree(dasd_reserve_req);
558b9ef0 6477 free_page((unsigned long)rawpadpage);
1da177e4
LT
6478}
6479
6480module_init(dasd_eckd_init);
6481module_exit(dasd_eckd_cleanup);