Merge branch 'mkp-fixes' into fixes
[linux-2.6-block.git] / drivers / scsi / sd.c
index 54519804c46a57b99ce680ee665b4fe28dd91148..3d22fc3e3c1a7ba6df90665f59cf10e8d38ef715 100644 (file)
@@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
        unsigned int max_blocks = 0;
 
        q->limits.discard_zeroes_data = 0;
-       q->limits.discard_alignment = sdkp->unmap_alignment *
-               logical_block_size;
-       q->limits.discard_granularity =
-               max(sdkp->physical_block_size,
-                   sdkp->unmap_granularity * logical_block_size);
+
+       /*
+        * When LBPRZ is reported, discard alignment and granularity
+        * must be fixed to the logical block size. Otherwise the block
+        * layer will drop misaligned portions of the request which can
+        * lead to data corruption. If LBPRZ is not set, we honor the
+        * device preference.
+        */
+       if (sdkp->lbprz) {
+               q->limits.discard_alignment = 0;
+               q->limits.discard_granularity = 1;
+       } else {
+               q->limits.discard_alignment = sdkp->unmap_alignment *
+                       logical_block_size;
+               q->limits.discard_granularity =
+                       max(sdkp->physical_block_size,
+                           sdkp->unmap_granularity * logical_block_size);
+       }
 
        sdkp->provisioning_mode = mode;
 
@@ -2321,11 +2334,8 @@ got_data:
                }
        }
 
-       if (sdkp->capacity > 0xffffffff) {
+       if (sdkp->capacity > 0xffffffff)
                sdp->use_16_for_rw = 1;
-               sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
-       } else
-               sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
 
        /* Rescale capacity to 512-byte units */
        if (sector_size == 4096)
@@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
 {
        unsigned int sector_sz = sdkp->device->sector_size;
        const int vpd_len = 64;
-       u32 max_xfer_length;
        unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
 
        if (!buffer ||
@@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
            scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
                goto out;
 
-       max_xfer_length = get_unaligned_be32(&buffer[8]);
-       if (max_xfer_length)
-               sdkp->max_xfer_blocks = max_xfer_length;
-
        blk_queue_io_min(sdkp->disk->queue,
                         get_unaligned_be16(&buffer[6]) * sector_sz);
-       blk_queue_io_opt(sdkp->disk->queue,
-                        get_unaligned_be32(&buffer[12]) * sector_sz);
+
+       sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
+       sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
 
        if (buffer[3] == 0x3c) {
                unsigned int lba_count, desc_count;
@@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
        return 0;
 }
 
+static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
+{
+       return blocks << (ilog2(sdev->sector_size) - 9);
+}
+
 /**
  *     sd_revalidate_disk - called the first time a new disk is seen,
  *     performs disk spin up, read_capacity, etc.
@@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
 {
        struct scsi_disk *sdkp = scsi_disk(disk);
        struct scsi_device *sdp = sdkp->device;
+       struct request_queue *q = sdkp->disk->queue;
        unsigned char *buffer;
-       unsigned int max_xfer;
+       unsigned int dev_max, rw_max;
 
        SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
                                      "sd_revalidate_disk\n"));
@@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
         */
        sd_set_flush_flag(sdkp);
 
-       max_xfer = sdkp->max_xfer_blocks;
-       max_xfer <<= ilog2(sdp->sector_size) - 9;
+       /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
+       dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
+
+       /* Some devices report a maximum block count for READ/WRITE requests. */
+       dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
+       q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
+
+       /*
+        * Use the device's preferred I/O size for reads and writes
+        * unless the reported value is unreasonably large (or garbage).
+        */
+       if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max &&
+           sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS)
+               rw_max = q->limits.io_opt =
+                       logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+       else
+               rw_max = BLK_DEF_MAX_SECTORS;
 
-       sdkp->disk->queue->limits.max_sectors =
-               min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+       /* Combine with controller limits */
+       q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
 
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);