Merge branch 'for-2.6.31' of git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 Jun 2009 16:29:42 +0000 (09:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 Jun 2009 16:29:42 +0000 (09:29 -0700)
* 'for-2.6.31' of git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6: (29 commits)
  ide: re-implement ide_pci_init_one() on top of ide_pci_init_two()
  ide: unexport ide_find_dma_mode()
  ide: fix PowerMac bootup oops
  ide: skip probe if there are no devices on the port (v2)
  sl82c105: add printk() logging facility
  ide-tape: fix proc warning
  ide: add IDE_DFLAG_NIEN_QUIRK device flag
  ide: respect quirk_drives[] list on all controllers
  hpt366: enable all quirks for devices on quirk_drives[] list
  hpt366: sync quirk_drives[] list with pdc202xx_{new,old}.c
  ide: remove superfluous SELECT_MASK() call from do_rw_taskfile()
  ide: remove superfluous SELECT_MASK() call from ide_driveid_update()
  icside: remove superfluous ->maskproc method
  ide-tape: fix IDE_AFLAG_* atomic accesses
  ide-tape: change IDE_AFLAG_IGNORE_DSC non-atomically
  pdc202xx_old: kill resetproc() method
  pdc202xx_old: don't call pdc202xx_reset() on IRQ timeout
  pdc202xx_old: use ide_dma_test_irq()
  ide: preserve Host Protected Area by default (v2)
  ide-gd: implement block device ->set_capacity method (v2)
  ...

12 files changed:
1  2 
Documentation/kernel-parameters.txt
drivers/ide/ide-atapi.c
drivers/ide/ide-disk.c
drivers/ide/ide-dma.c
drivers/ide/ide-io.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/ide/pdc202xx_old.c
drivers/ide/tx4939ide.c
fs/partitions/check.c
include/linux/blkdev.h
include/linux/genhd.h

index 7bcdebffdab3850d9d3471a2f3ca5ea3b1f0b32a,e58c91ca802c33bb3942ef6725877489789c9029..0bf8a882ee9e18eb1bfba3ca05de68137211d1d8
@@@ -17,12 -17,6 +17,12 @@@ are specified on the kernel command lin
  
        usbcore.blinkenlights=1
  
 +Hyphens (dashes) and underscores are equivalent in parameter names, so
 +      log_buf_len=1M print-fatal-signals=1
 +can also be entered as
 +      log-buf-len=1M print_fatal_signals=1
 +
 +
  This document may not be entirely up to date and comprehensive. The command
  "modinfo -p ${modulename}" shows a current list of all parameters of a loadable
  module. Loadable modules, after being loaded into the running kernel, also
@@@ -56,6 -50,7 +56,6 @@@ parameter is applicable
        ISAPNP  ISA PnP code is enabled.
        ISDN    Appropriate ISDN support is enabled.
        JOY     Appropriate joystick support is enabled.
 -      KMEMTRACE kmemtrace is enabled.
        LIBATA  Libata driver is enabled
        LP      Printer support is enabled.
        LOOP    Loopback device support is enabled.
@@@ -236,35 -231,6 +236,35 @@@ and is between 256 and 4096 characters
                        power state again in power transition.
                        1 : disable the power state check
  
 +      acpi_sci=       [HW,ACPI] ACPI System Control Interrupt trigger mode
 +                      Format: { level | edge | high | low }
 +
 +      acpi_serialize  [HW,ACPI] force serialization of AML methods
 +
 +      acpi_skip_timer_override [HW,ACPI]
 +                      Recognize and ignore IRQ0/pin2 Interrupt Override.
 +                      For broken nForce2 BIOS resulting in XT-PIC timer.
 +
 +      acpi_sleep=     [HW,ACPI] Sleep options
 +                      Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
 +                                old_ordering, s4_nonvs }
 +                      See Documentation/power/video.txt for information on
 +                      s3_bios and s3_mode.
 +                      s3_beep is for debugging; it makes the PC's speaker beep
 +                      as soon as the kernel's real-mode entry point is called.
 +                      s4_nohwsig prevents ACPI hardware signature from being
 +                      used during resume from hibernation.
 +                      old_ordering causes the ACPI 1.0 ordering of the _PTS
 +                      control method, with respect to putting devices into
 +                      low power states, to be enforced (the ACPI 2.0 ordering
 +                      of _PTS is used by default).
 +                      s4_nonvs prevents the kernel from saving/restoring the
 +                      ACPI NVS memory during hibernation.
 +
 +      acpi_use_timer_override [HW,ACPI]
 +                      Use timer override. For some broken Nvidia NF5 boards
 +                      that require a timer override, but don't have HPET
 +
        acpi_enforce_resources= [ACPI]
                        { strict | lax | no }
                        Check for resource conflicts between native drivers
        ad1848=         [HW,OSS]
                        Format: <io>,<irq>,<dma>,<dma2>,<type>
  
 +      add_efi_memmap  [EFI; X86] Include EFI memory map in
 +                      kernel's map of available physical RAM.
 +
        advansys=       [HW,SCSI]
                        See header of drivers/scsi/advansys.c.
  
                                    flushed before they will be reused, which
                                    is a lot of faster
  
 -      amd_iommu_size= [HW,X86-64]
 -                      Define the size of the aperture for the AMD IOMMU
 -                      driver. Possible values are:
 -                      '32M', '64M' (default), '128M', '256M', '512M', '1G'
 -
        amijoy.map=     [HW,JOY] Amiga joystick support
                        Map of devices attached to JOY0DAT and JOY1DAT
                        Format: <a>,<b>
                        not play well with APC CPU idle - disable it if you have
                        APC and your system crashes randomly.
  
 -      apic=           [APIC,i386] Advanced Programmable Interrupt Controller
 +      apic=           [APIC,X86-32] Advanced Programmable Interrupt Controller
                        Change the output verbosity whilst booting
                        Format: { quiet (default) | verbose | debug }
                        Change the amount of debugging information output
                        DMA-API debugging code disables itself because the
                        architectural default is too low.
  
 +      dma_debug_driver=<driver_name>
 +                      With this option the DMA-API debugging driver
 +                      filter feature can be enabled at boot time. Just
 +                      pass the driver to filter for as the parameter.
 +                      The filter can be disabled or changed to another
 +                      driver later using sysfs.
 +
        dscc4.setup=    [NET]
  
        dtc3181e=       [HW,SCSI]
                        to discrete, to make X server driver able to add WB
                        entry later. This parameter enables that.
  
 -      enable_timer_pin_1 [i386,x86-64]
 +      enable_timer_pin_1 [X86]
                        Enable PIN 1 of APIC timer
                        Can be useful to work around chipset bugs
                        (in particular on some ATI chipsets).
                        ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
  
        ftrace=[tracer]
 -                      [ftrace] will set and start the specified tracer
 +                      [FTRACE] will set and start the specified tracer
                        as early as possible in order to facilitate early
                        boot debugging.
  
        ftrace_dump_on_oops
 -                      [ftrace] will dump the trace buffers on oops.
 +                      [FTRACE] will dump the trace buffers on oops.
 +
 +      ftrace_filter=[function-list]
 +                      [FTRACE] Limit the functions traced by the function
 +                      tracer at boot up. function-list is a comma separated
 +                      list of functions. This list can be changed at run
 +                      time by the set_ftrace_filter file in the debugfs
 +                      tracing directory. 
 +
 +      ftrace_notrace=[function-list]
 +                      [FTRACE] Do not trace the functions specified in
 +                      function-list. This list can be changed at run time
 +                      by the set_ftrace_notrace file in the debugfs
 +                      tracing directory.
  
        gamecon.map[2|3]=
                        [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
  
        hashdist=       [KNL,NUMA] Large hashes allocated during boot
                        are distributed across NUMA nodes.  Defaults on
 -                      for IA-64, off otherwise.
 +                      for 64bit NUMA, off otherwise.
                        Format: 0 | 1 (for off | on)
  
        hcl=            [IA-64] SGI's Hardware Graph compatibility layer
  
        ide-core.nodma= [HW] (E)IDE subsystem
                        Format: =0.0 to prevent dma on hda, =0.1 hdb =1.0 hdc
-                       .vlb_clock .pci_clock .noflush .noprobe .nowerr .cdrom
-                       .chs .ignore_cable are additional options
-                       See Documentation/ide/ide.txt.
-       idebus=         [HW] (E)IDE subsystem - VLB/PCI bus speed
+                       .vlb_clock .pci_clock .noflush .nohpa .noprobe .nowerr
+                       .cdrom .chs .ignore_cable are additional options
                        See Documentation/ide/ide.txt.
  
        ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
                        Formt: { "sha1" | "md5" }
                        default: "sha1"
  
 +      ima_tcb         [IMA]
 +                      Load a policy which meets the needs of the Trusted
 +                      Computing Base.  This means IMA will measure all
 +                      programs exec'd, files mmap'd for exec, and all files
 +                      opened for read by uid=0.
 +
        in2000=         [HW,SCSI]
                        See header of drivers/scsi/in2000.c.
  
                        use the HighMem zone if it exists, and the Normal
                        zone if it does not.
  
 -      kmemtrace.enable=       [KNL,KMEMTRACE] Format: { yes | no }
 -                              Controls whether kmemtrace is enabled
 -                              at boot-time.
 -
 -      kmemtrace.subbufs=n     [KNL,KMEMTRACE] Overrides the number of
 -                      subbufs kmemtrace's relay channel has. Set this
 -                      higher than default (KMEMTRACE_N_SUBBUFS in code) if
 -                      you experience buffer overruns.
 -
        kgdboc=         [HW] kgdb over consoles.
                        Requires a tty driver that supports console polling.
                        (only serial suported for now)
                        Configure the RouterBoard 532 series on-chip
                        Ethernet adapter MAC address.
  
 +      kmemleak=       [KNL] Boot-time kmemleak enable/disable
 +                      Valid arguments: on, off
 +                      Default: on
 +
        kstack=N        [X86] Print N words from the kernel stack
                        in oops dumps.
  
                        register save and restore. The kernel will only save
                        legacy floating-point registers on task switch.
  
 +      noxsave         [BUGS=X86] Disables x86 extended register state save
 +                      and restore using xsave. The kernel will fallback to
 +                      enabling legacy floating-point and sse state.
 +
        nohlt           [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or
                        wfi(ARM) instruction doesn't work correctly and not to
                        use it. This is also useful when using JTAG debugger.
        noinitrd        [RAM] Tells the kernel not to load any configured
                        initial RAM disk.
  
 +      nointremap      [X86-64, Intel-IOMMU] Do not enable interrupt
 +                      remapping.
 +
        nointroute      [IA-64]
  
        nojitter        [IA64] Disables jitter checking for ITC timers.
  
        nowb            [ARM]
  
 +      nox2apic        [X86-64,APIC] Do not enable x2APIC mode.
 +
        nptcg=          [IA64] Override max number of concurrent global TLB
                        purges which is reported from either PAL_VM_SUMMARY or
                        SAL PALO.
        oprofile.timer= [HW]
                        Use timer interrupt instead of performance counters
  
 +      oprofile.cpu_type=      Force an oprofile cpu type
 +                      This might be useful if you have an older oprofile
 +                      userland or if you want common events.
 +                      Format: { archperfmon }
 +                      archperfmon: [X86] Force use of architectural
 +                              perfmon on Intel CPUs instead of the
 +                              CPU specific event set.
 +
        osst=           [HW,SCSI] SCSI Tape Driver
                        Format: <buffer_size>,<write_threshold>
                        See also Documentation/scsi/st.txt.
                        autoconfiguration.
                        Ranges are in pairs (memory base and size).
  
 +      ports=          [IP_VS_FTP] IPVS ftp helper module
 +                      Default is 21.
 +                      Up to 8 (IP_VS_APP_MAX_PORTS) ports
 +                      may be specified.
 +                      Format: <port>,<port>....
 +
        print-fatal-signals=
                        [KNL] debug: print fatal signals
                        print-fatal-signals=1: print segfault info to
diff --combined drivers/ide/ide-atapi.c
index 757e5956b13297f38a204740486f9907c35a0ae7,fbcb8513a4c806aa75cbc3f0896ac5d0feabb572..bbdd2547f12aa98779dcb9d96d7461cfdc8dc5af
@@@ -246,7 -246,6 +246,7 @@@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq)
   */
  void ide_retry_pc(ide_drive_t *drive)
  {
 +      struct request *failed_rq = drive->hwif->rq;
        struct request *sense_rq = &drive->sense_rq;
        struct ide_atapi_pc *pc = &drive->request_sense_pc;
  
        ide_init_pc(pc);
        memcpy(pc->c, sense_rq->cmd, 12);
        pc->buf = bio_data(sense_rq->bio);      /* pointer to mapped address */
 -      pc->req_xfer = sense_rq->data_len;
 +      pc->req_xfer = blk_rq_bytes(sense_rq);
  
        if (drive->media == ide_tape)
-               set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
+               drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
  
 -      if (ide_queue_sense_rq(drive, pc))
 -              ide_complete_rq(drive, -EIO, blk_rq_bytes(drive->hwif->rq));
 +      /*
 +       * Push back the failed request and put request sense on top
 +       * of it.  The failed command will be retried after sense data
 +       * is acquired.
 +       */
 +      blk_requeue_request(failed_rq->q, failed_rq);
 +      drive->hwif->rq = NULL;
 +      if (ide_queue_sense_rq(drive, pc)) {
 +              blk_start_request(failed_rq);
 +              ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
 +      }
  }
  EXPORT_SYMBOL_GPL(ide_retry_pc);
  
@@@ -313,7 -303,7 +313,7 @@@ int ide_cd_get_xferlen(struct request *
                return 32768;
        else if (blk_sense_request(rq) || blk_pc_request(rq) ||
                         rq->cmd_type == REQ_TYPE_ATA_PC)
 -              return rq->data_len;
 +              return blk_rq_bytes(rq);
        else
                return 0;
  }
@@@ -377,6 -367,7 +377,6 @@@ static ide_startstop_t ide_pc_intr(ide_
        /* No more interrupts */
        if ((stat & ATA_DRQ) == 0) {
                int uptodate, error;
 -              unsigned int done;
  
                debug_log("Packet command completed, %d bytes transferred\n",
                          pc->xferred);
                        error = uptodate ? 0 : -EIO;
                }
  
 -              ide_complete_rq(drive, error, done);
 +              ide_complete_rq(drive, error, blk_rq_bytes(rq));
                return ide_stopped;
        }
  
diff --combined drivers/ide/ide-disk.c
index c6f7fcfb9d672f47fb585fc035a1ee9f74f9642d,b5f25387cc019a02835b65c27074bd4df4d26cac..6a1de21697096525d19b850ffbfba2f99a129c88
@@@ -82,7 -82,7 +82,7 @@@ static ide_startstop_t __ide_do_rw_disk
                                        sector_t block)
  {
        ide_hwif_t *hwif        = drive->hwif;
 -      u16 nsectors            = (u16)rq->nr_sectors;
 +      u16 nsectors            = (u16)blk_rq_sectors(rq);
        u8 lba48                = !!(drive->dev_flags & IDE_DFLAG_LBA48);
        u8 dma                  = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
        struct ide_cmd          cmd;
@@@ -90,7 -90,7 +90,7 @@@
        ide_startstop_t         rc;
  
        if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
 -              if (block + rq->nr_sectors > 1ULL << 28)
 +              if (block + blk_rq_sectors(rq) > 1ULL << 28)
                        dma = 0;
                else
                        lba48 = 0;
@@@ -195,9 -195,9 +195,9 @@@ static ide_startstop_t ide_do_rw_disk(i
  
        ledtrig_ide_activity();
  
 -      pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
 +      pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
                 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
 -               (unsigned long long)block, rq->nr_sectors,
 +               (unsigned long long)block, blk_rq_sectors(rq),
                 (unsigned long)rq->buffer);
  
        if (hwif->rw_disk)
@@@ -302,14 -302,12 +302,12 @@@ static const struct drive_list_entry hp
        { NULL,         NULL }
  };
  
- static void idedisk_check_hpa(ide_drive_t *drive)
+ static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
  {
-       unsigned long long capacity, set_max;
-       int lba48 = ata_id_lba48_enabled(drive->id);
+       u64 capacity, set_max;
  
        capacity = drive->capacity64;
-       set_max = idedisk_read_native_max_address(drive, lba48);
+       set_max  = idedisk_read_native_max_address(drive, lba48);
  
        if (ide_in_drive_list(drive->id, hpa_list)) {
                /*
                        set_max--;
        }
  
+       return set_max;
+ }
+ static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
+ {
+       set_max = idedisk_set_max_address(drive, set_max, lba48);
+       if (set_max)
+               drive->capacity64 = set_max;
+       return set_max;
+ }
+ static void idedisk_check_hpa(ide_drive_t *drive)
+ {
+       u64 capacity, set_max;
+       int lba48 = ata_id_lba48_enabled(drive->id);
+       capacity = drive->capacity64;
+       set_max  = ide_disk_hpa_get_native_capacity(drive, lba48);
        if (set_max <= capacity)
                return;
  
+       drive->probed_capacity = set_max;
        printk(KERN_INFO "%s: Host Protected Area detected.\n"
                         "\tcurrent capacity is %llu sectors (%llu MB)\n"
                         "\tnative  capacity is %llu sectors (%llu MB)\n",
                         capacity, sectors_to_MB(capacity),
                         set_max, sectors_to_MB(set_max));
  
-       set_max = idedisk_set_max_address(drive, set_max, lba48);
+       if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
+               return;
  
-       if (set_max) {
-               drive->capacity64 = set_max;
+       set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
+       if (set_max)
                printk(KERN_INFO "%s: Host Protected Area disabled.\n",
                                 drive->name);
-       }
  }
  
  static int ide_disk_get_capacity(ide_drive_t *drive)
                drive->capacity64 = drive->cyl * drive->head * drive->sect;
        }
  
+       drive->probed_capacity = drive->capacity64;
        if (lba) {
                drive->dev_flags |= IDE_DFLAG_LBA;
  
                       "%llu sectors (%llu MB)\n",
                       drive->name, (unsigned long long)drive->capacity64,
                       sectors_to_MB(drive->capacity64));
-               drive->capacity64 = 1ULL << 28;
+               drive->probed_capacity = drive->capacity64 = 1ULL << 28;
        }
  
        if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
        return 0;
  }
  
+ static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity)
+ {
+       u64 set = min(capacity, drive->probed_capacity);
+       u16 *id = drive->id;
+       int lba48 = ata_id_lba48_enabled(id);
+       if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
+           ata_id_hpa_enabled(id) == 0)
+               goto out;
+       /*
+        * according to the spec the SET MAX ADDRESS command shall be
+        * immediately preceded by a READ NATIVE MAX ADDRESS command
+        */
+       capacity = ide_disk_hpa_get_native_capacity(drive, lba48);
+       if (capacity == 0)
+               goto out;
+       set = ide_disk_hpa_set_capacity(drive, set, lba48);
+       if (set) {
+               /* needed for ->resume to disable HPA */
+               drive->dev_flags |= IDE_DFLAG_NOHPA;
+               return set;
+       }
+ out:
+       return drive->capacity64;
+ }
  static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
  {
        ide_drive_t *drive = q->queuedata;
@@@ -428,14 -478,14 +478,14 @@@ static int set_multcount(ide_drive_t *d
        if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
                return -EINVAL;
  
-       if (drive->special.b.set_multmode)
+       if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
                return -EBUSY;
  
        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
  
        drive->mult_req = arg;
-       drive->special.b.set_multmode = 1;
+       drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
        error = blk_execute_rq(drive->queue, NULL, rq, 0);
        blk_put_request(rq);
  
@@@ -639,7 -689,7 +689,7 @@@ static void ide_disk_setup(ide_drive_t 
        }
  
        printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
 -              q->max_sectors / 2);
 +             queue_max_sectors(q) / 2);
  
        if (ata_id_is_ssd(id))
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
@@@ -740,6 -790,7 +790,7 @@@ static int ide_disk_set_doorlock(ide_dr
  
  const struct ide_disk_ops ide_ata_disk_ops = {
        .check          = ide_disk_check,
+       .set_capacity   = ide_disk_set_capacity,
        .get_capacity   = ide_disk_get_capacity,
        .setup          = ide_disk_setup,
        .flush          = ide_disk_flush,
diff --combined drivers/ide/ide-dma.c
index 001f68f0bb285c62d0b42b6dbb1508d92182923a,0bbf71f8e499209d2c71d65a2bf1d60b00130bde..219e6fb78dc6414304ff8b707c59554f9c82db75
@@@ -103,7 -103,7 +103,7 @@@ ide_startstop_t ide_dma_intr(ide_drive_
                                ide_finish_cmd(drive, cmd, stat);
                        else
                                ide_complete_rq(drive, 0,
 -                                              cmd->rq->nr_sectors << 9);
 +                                              blk_rq_sectors(cmd->rq) << 9);
                        return ide_stopped;
                }
                printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
@@@ -347,7 -347,6 +347,6 @@@ u8 ide_find_dma_mode(ide_drive_t *drive
  
        return mode;
  }
- EXPORT_SYMBOL_GPL(ide_find_dma_mode);
  
  static int ide_tune_dma(ide_drive_t *drive)
  {
diff --combined drivers/ide/ide-io.c
index bba4297f2f03fcadf80f8bb3f3a05e780e6f3551,243cf6561e7e478cf60a69c66ad1792c04f229ac..272cc38f6dbe55192283db6ea2902e3d576b6a26
@@@ -116,9 -116,9 +116,9 @@@ void ide_complete_cmd(ide_drive_t *driv
  unsigned int ide_rq_bytes(struct request *rq)
  {
        if (blk_pc_request(rq))
 -              return rq->data_len;
 +              return blk_rq_bytes(rq);
        else
 -              return rq->hard_cur_sectors << 9;
 +              return blk_rq_cur_sectors(rq) << 9;
  }
  EXPORT_SYMBOL_GPL(ide_rq_bytes);
  
@@@ -133,7 -133,7 +133,7 @@@ int ide_complete_rq(ide_drive_t *drive
         * and complete the whole request right now
         */
        if (blk_noretry_request(rq) && error <= 0)
 -              nr_bytes = rq->hard_nr_sectors << 9;
 +              nr_bytes = blk_rq_sectors(rq) << 9;
  
        rc = ide_end_rq(drive, rq, error, nr_bytes);
        if (rc == 0)
@@@ -184,29 -184,42 +184,42 @@@ static void ide_tf_set_setmult_cmd(ide_
        tf->command = ATA_CMD_SET_MULTI;
  }
  
- static ide_startstop_t ide_disk_special(ide_drive_t *drive)
+ /**
+  *    do_special              -       issue some special commands
+  *    @drive: drive the command is for
+  *
+  *    do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
+  *    ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
+  */
+ static ide_startstop_t do_special(ide_drive_t *drive)
  {
-       special_t *s = &drive->special;
        struct ide_cmd cmd;
  
+ #ifdef DEBUG
+       printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
+               drive->special_flags);
+ #endif
+       if (drive->media != ide_disk) {
+               drive->special_flags = 0;
+               drive->mult_req = 0;
+               return ide_stopped;
+       }
        memset(&cmd, 0, sizeof(cmd));
        cmd.protocol = ATA_PROT_NODATA;
  
-       if (s->b.set_geometry) {
-               s->b.set_geometry = 0;
+       if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
+               drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
                ide_tf_set_specify_cmd(drive, &cmd.tf);
-       } else if (s->b.recalibrate) {
-               s->b.recalibrate = 0;
+       } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
+               drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
                ide_tf_set_restore_cmd(drive, &cmd.tf);
-       } else if (s->b.set_multmode) {
-               s->b.set_multmode = 0;
+       } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
+               drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
                ide_tf_set_setmult_cmd(drive, &cmd.tf);
-       } else if (s->all) {
-               int special = s->all;
-               s->all = 0;
-               printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
-               return ide_stopped;
-       }
+       } else
+               BUG();
  
        cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
        cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
        return ide_started;
  }
  
- /**
-  *    do_special              -       issue some special commands
-  *    @drive: drive the command is for
-  *
-  *    do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
-  *    ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
-  *
-  *    It used to do much more, but has been scaled back.
-  */
- static ide_startstop_t do_special (ide_drive_t *drive)
- {
-       special_t *s = &drive->special;
- #ifdef DEBUG
-       printk("%s: do_special: 0x%02x\n", drive->name, s->all);
- #endif
-       if (drive->media == ide_disk)
-               return ide_disk_special(drive);
-       s->all = 0;
-       drive->mult_req = 0;
-       return ide_stopped;
- }
  void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
  {
        ide_hwif_t *hwif = drive->hwif;
@@@ -279,7 -267,7 +267,7 @@@ static ide_startstop_t execute_drive_cm
  
        if (cmd) {
                if (cmd->protocol == ATA_PROT_PIO) {
 -                      ide_init_sg_cmd(cmd, rq->nr_sectors << 9);
 +                      ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
                        ide_map_sg(drive, cmd);
                }
  
@@@ -351,7 -339,8 +339,8 @@@ static ide_startstop_t start_request (i
                printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
                return startstop;
        }
-       if (!drive->special.all) {
+       if (drive->special_flags == 0) {
                struct ide_driver *drv;
  
                /*
  
                drv = *(struct ide_driver **)rq->rq_disk->private_data;
  
 -              return drv->do_request(drive, rq, rq->sector);
 +              return drv->do_request(drive, rq, blk_rq_pos(rq));
        }
        return do_special(drive);
  kill_rq:
@@@ -487,10 -476,10 +476,10 @@@ void do_ide_request(struct request_queu
  
        if (!ide_lock_port(hwif)) {
                ide_hwif_t *prev_port;
 +
 +              WARN_ON_ONCE(hwif->rq);
  repeat:
                prev_port = hwif->host->cur_port;
 -              hwif->rq = NULL;
 -
                if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
                    time_after(drive->sleep, jiffies)) {
                        ide_unlock_port(hwif);
  
                if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
                    hwif != prev_port) {
+                       ide_drive_t *cur_dev =
+                               prev_port ? prev_port->cur_dev : NULL;
                        /*
                         * set nIEN for previous port, drives in the
-                        * quirk_list may not like intr setups/cleanups
+                        * quirk list may not like intr setups/cleanups
                         */
-                       if (prev_port && prev_port->cur_dev->quirk_list == 0)
+                       if (cur_dev &&
+                           (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
                                prev_port->tp_ops->write_devctl(prev_port,
                                                                ATA_NIEN |
                                                                ATA_DEVCTL_OBS);
                 * we know that the queue isn't empty, but this can happen
                 * if the q->prep_rq_fn() decides to kill a request
                 */
 -              rq = elv_next_request(drive->queue);
 +              if (!rq)
 +                      rq = blk_fetch_request(drive->queue);
 +
                spin_unlock_irq(q->queue_lock);
                spin_lock_irq(&hwif->lock);
  
                /*
                 * Sanity: don't accept a request that isn't a PM request
                 * if we are currently power managed. This is very important as
 -               * blk_stop_queue() doesn't prevent the elv_next_request()
 +               * blk_stop_queue() doesn't prevent the blk_fetch_request()
                 * above to return us whatever is in the queue. Since we call
                 * ide_do_request() ourselves, we end up taking requests while
                 * the queue is blocked...
                startstop = start_request(drive, rq);
                spin_lock_irq(&hwif->lock);
  
 -              if (startstop == ide_stopped)
 +              if (startstop == ide_stopped) {
 +                      rq = hwif->rq;
 +                      hwif->rq = NULL;
                        goto repeat;
 +              }
        } else
                goto plug_device;
  out:
@@@ -577,24 -565,18 +570,24 @@@ plug_device
  plug_device_2:
        spin_lock_irq(q->queue_lock);
  
 +      if (rq)
 +              blk_requeue_request(q, rq);
        if (!elv_queue_empty(q))
                blk_plug_device(q);
  }
  
 -static void ide_plug_device(ide_drive_t *drive)
 +static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
  {
        struct request_queue *q = drive->queue;
        unsigned long flags;
  
        spin_lock_irqsave(q->queue_lock, flags);
 +
 +      if (rq)
 +              blk_requeue_request(q, rq);
        if (!elv_queue_empty(q))
                blk_plug_device(q);
 +
        spin_unlock_irqrestore(q->queue_lock, flags);
  }
  
@@@ -643,7 -625,6 +636,7 @@@ void ide_timer_expiry (unsigned long da
        unsigned long   flags;
        int             wait = -1;
        int             plug_device = 0;
 +      struct request  *uninitialized_var(rq_in_flight);
  
        spin_lock_irqsave(&hwif->lock, flags);
  
                spin_lock_irq(&hwif->lock);
                enable_irq(hwif->irq);
                if (startstop == ide_stopped && hwif->polling == 0) {
 +                      rq_in_flight = hwif->rq;
 +                      hwif->rq = NULL;
                        ide_unlock_port(hwif);
                        plug_device = 1;
                }
  
        if (plug_device) {
                ide_unlock_host(hwif->host);
 -              ide_plug_device(drive);
 +              ide_requeue_and_plug(drive, rq_in_flight);
        }
  }
  
@@@ -801,7 -780,6 +794,7 @@@ irqreturn_t ide_intr (int irq, void *de
        ide_startstop_t startstop;
        irqreturn_t irq_ret = IRQ_NONE;
        int plug_device = 0;
 +      struct request *uninitialized_var(rq_in_flight);
  
        if (host->host_flags & IDE_HFLAG_SERIALIZE) {
                if (hwif != host->cur_port)
         */
        if (startstop == ide_stopped && hwif->polling == 0) {
                BUG_ON(hwif->handler);
 +              rq_in_flight = hwif->rq;
 +              hwif->rq = NULL;
                ide_unlock_port(hwif);
                plug_device = 1;
        }
@@@ -892,7 -868,7 +885,7 @@@ out
  out_early:
        if (plug_device) {
                ide_unlock_host(hwif->host);
 -              ide_plug_device(drive);
 +              ide_requeue_and_plug(drive, rq_in_flight);
        }
  
        return irq_ret;
diff --combined drivers/ide/ide-tape.c
index d9764f0bc82f54e4b250c6e03754d108e2252383,51ea59e3f6ad81b22e803330f47d9c010c86bee9..4b447a8a49d4f7c44cb1c37a2390f902802dc519
@@@ -240,18 -240,27 +240,27 @@@ static struct class *idetape_sysfs_clas
  
  static void ide_tape_release(struct device *);
  
- static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
+ static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
+ static struct ide_tape_obj *ide_tape_get(struct gendisk *disk, bool cdev,
+                                        unsigned int i)
  {
        struct ide_tape_obj *tape = NULL;
  
        mutex_lock(&idetape_ref_mutex);
-       tape = ide_drv_g(disk, ide_tape_obj);
+       if (cdev)
+               tape = idetape_devs[i];
+       else
+               tape = ide_drv_g(disk, ide_tape_obj);
        if (tape) {
                if (ide_device_get(tape->drive))
                        tape = NULL;
                else
                        get_device(&tape->dev);
        }
        mutex_unlock(&idetape_ref_mutex);
        return tape;
  }
@@@ -266,24 -275,6 +275,6 @@@ static void ide_tape_put(struct ide_tap
        mutex_unlock(&idetape_ref_mutex);
  }
  
- /*
-  * The variables below are used for the character device interface. Additional
-  * state variables are defined in our ide_drive_t structure.
-  */
- static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
- static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
- {
-       struct ide_tape_obj *tape = NULL;
-       mutex_lock(&idetape_ref_mutex);
-       tape = idetape_devs[i];
-       if (tape)
-               get_device(&tape->dev);
-       mutex_unlock(&idetape_ref_mutex);
-       return tape;
- }
  /*
   * called on each failed packet command retry to analyze the request sense. We
   * currently do not utilize this information.
@@@ -380,7 -371,7 +371,7 @@@ static int ide_tape_callback(ide_drive_
                }
  
                tape->first_frame += blocks;
 -              rq->data_len -= blocks * tape->blk_size;
 +              rq->resid_len -= blocks * tape->blk_size;
  
                if (pc->error) {
                        uptodate = 0;
                if (readpos[0] & 0x4) {
                        printk(KERN_INFO "ide-tape: Block location is unknown"
                                         "to the tape\n");
-                       clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
+                       clear_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
+                                 &drive->atapi_flags);
                        uptodate = 0;
                        err = IDE_DRV_ERROR_GENERAL;
                } else {
  
                        tape->partition = readpos[1];
                        tape->first_frame = be32_to_cpup((__be32 *)&readpos[4]);
-                       set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
+                       set_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
+                               &drive->atapi_flags);
                }
        }
  
@@@ -586,7 -579,7 +579,7 @@@ static void ide_tape_create_rw_cmd(idet
                                   struct ide_atapi_pc *pc, struct request *rq,
                                   u8 opcode)
  {
 -      unsigned int length = rq->nr_sectors;
 +      unsigned int length = blk_rq_sectors(rq);
  
        ide_init_pc(pc);
        put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
@@@ -617,8 -610,8 +610,8 @@@ static ide_startstop_t idetape_do_reque
        struct ide_cmd cmd;
        u8 stat;
  
 -      debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu\n",
 -                (unsigned long long)rq->sector, rq->nr_sectors);
 +      debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
 +                (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
  
        if (!(blk_special_request(rq) || blk_sense_request(rq))) {
                /* We do not support buffer cache originated requests. */
  
        if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
            (rq->cmd[13] & REQ_IDETAPE_PC2) == 0)
-               set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
+               drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
  
        if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
-               set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
+               drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
                drive->dev_flags &= ~IDE_DFLAG_POST_RESET;
        }
  
-       if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) &&
-           (stat & ATA_DSC) == 0) {
+       if (!(drive->atapi_flags & IDE_AFLAG_IGNORE_DSC) &&
+           !(stat & ATA_DSC)) {
                if (postponed_rq == NULL) {
                        tape->dsc_polling_start = jiffies;
                        tape->dsc_poll_freq = tape->best_dsc_rw_freq;
                        tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
                idetape_postpone_request(drive);
                return ide_stopped;
-       }
+       } else
+               drive->atapi_flags &= ~IDE_AFLAG_IGNORE_DSC;
        if (rq->cmd[13] & REQ_IDETAPE_READ) {
                pc = &tape->queued_pc;
                ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
@@@ -744,7 -739,7 +739,7 @@@ static int idetape_wait_ready(ide_drive
        int load_attempted = 0;
  
        /* Wait for the tape to become ready */
-       set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
+       set_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), &drive->atapi_flags);
        timeout += jiffies;
        while (time_before(jiffies, timeout)) {
                if (ide_do_test_unit_ready(drive, disk) == 0)
@@@ -820,7 -815,7 +815,7 @@@ static void __ide_tape_discard_merge_bu
        if (tape->chrdev_dir != IDETAPE_DIR_READ)
                return;
  
-       clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
+       clear_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags);
        tape->valid = 0;
        if (tape->buf != NULL) {
                kfree(tape->buf);
@@@ -892,7 -887,7 +887,7 @@@ static int idetape_queue_rw_tail(ide_dr
        rq->cmd_type = REQ_TYPE_SPECIAL;
        rq->cmd[13] = cmd;
        rq->rq_disk = tape->disk;
 -      rq->sector = tape->first_frame;
 +      rq->__sector = tape->first_frame;
  
        if (size) {
                ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
        blk_execute_rq(drive->queue, tape->disk, rq, 0);
  
        /* calculate the number of transferred bytes and update buffer state */
 -      size -= rq->data_len;
 +      size -= rq->resid_len;
        tape->cur = tape->buf;
        if (cmd == REQ_IDETAPE_READ)
                tape->valid = size;
@@@ -1113,7 -1108,8 +1108,8 @@@ static int idetape_space_over_filemarks
  
        if (tape->chrdev_dir == IDETAPE_DIR_READ) {
                tape->valid = 0;
-               if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
+               if (test_and_clear_bit(ilog2(IDE_AFLAG_FILEMARK),
+                                      &drive->atapi_flags))
                        ++count;
                ide_tape_discard_merge_buffer(drive, 0);
        }
@@@ -1168,7 -1164,7 +1164,7 @@@ static ssize_t idetape_chrdev_read(stru
        debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
  
        if (tape->chrdev_dir != IDETAPE_DIR_READ) {
-               if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags))
+               if (test_bit(ilog2(IDE_AFLAG_DETECT_BS), &drive->atapi_flags))
                        if (count > tape->blk_size &&
                            (count % tape->blk_size) == 0)
                                tape->user_bs_factor = count / tape->blk_size;
                /* refill if staging buffer is empty */
                if (!tape->valid) {
                        /* If we are at a filemark, nothing more to read */
-                       if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
+                       if (test_bit(ilog2(IDE_AFLAG_FILEMARK),
+                                    &drive->atapi_flags))
                                break;
                        /* read */
                        if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
                done += todo;
        }
  
-       if (!done && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
+       if (!done && test_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags)) {
                debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
  
                idetape_space_over_filemarks(drive, MTFSF, 1);
@@@ -1336,7 -1333,8 +1333,8 @@@ static int idetape_mtioctop(ide_drive_
                ide_tape_discard_merge_buffer(drive, 0);
                retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK);
                if (!retval)
-                       clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
+                       clear_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
+                                 &drive->atapi_flags);
                return retval;
        case MTNOP:
                ide_tape_discard_merge_buffer(drive, 0);
                            mt_count % tape->blk_size)
                                return -EIO;
                        tape->user_bs_factor = mt_count / tape->blk_size;
-                       clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
+                       clear_bit(ilog2(IDE_AFLAG_DETECT_BS),
+                                 &drive->atapi_flags);
                } else
-                       set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
+                       set_bit(ilog2(IDE_AFLAG_DETECT_BS),
+                               &drive->atapi_flags);
                return 0;
        case MTSEEK:
                ide_tape_discard_merge_buffer(drive, 0);
@@@ -1486,7 -1486,7 +1486,7 @@@ static int idetape_chrdev_open(struct i
                return -ENXIO;
  
        lock_kernel();
-       tape = ide_tape_chrdev_get(i);
+       tape = ide_tape_get(NULL, true, i);
        if (!tape) {
                unlock_kernel();
                return -ENXIO;
  
        filp->private_data = tape;
  
-       if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) {
+       if (test_and_set_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags)) {
                retval = -EBUSY;
                goto out_put_tape;
        }
  
        retval = idetape_wait_ready(drive, 60 * HZ);
        if (retval) {
-               clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
+               clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
                printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
                goto out_put_tape;
        }
  
        idetape_read_position(drive);
-       if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags))
+       if (!test_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), &drive->atapi_flags))
                (void)idetape_rewind_tape(drive);
  
        /* Read block size and write protect status from drive. */
        if (tape->write_prot) {
                if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
                    (filp->f_flags & O_ACCMODE) == O_RDWR) {
-                       clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
+                       clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
                        retval = -EROFS;
                        goto out_put_tape;
                }
@@@ -1591,15 -1591,17 +1591,17 @@@ static int idetape_chrdev_release(struc
                        ide_tape_discard_merge_buffer(drive, 1);
        }
  
-       if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags))
+       if (minor < 128 && test_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
+                                   &drive->atapi_flags))
                (void) idetape_rewind_tape(drive);
        if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
                if (tape->door_locked == DOOR_LOCKED) {
                        if (!ide_set_media_lock(drive, tape->disk, 0))
                                tape->door_locked = DOOR_UNLOCKED;
                }
        }
-       clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
+       clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
        ide_tape_put(tape);
        unlock_kernel();
        return 0;
@@@ -1905,7 -1907,7 +1907,7 @@@ static const struct file_operations ide
  
  static int idetape_open(struct block_device *bdev, fmode_t mode)
  {
-       struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk);
+       struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0);
  
        if (!tape)
                return -ENXIO;
index a0c3e1b2f73c20b6b005eb9ce2b35ea61bbb42f1,fbcb4151b0b7a98e92b09b0a79e7e78c08e82a19..75b85a8cd2d4935ba500927946fa292c890e0e8d
@@@ -98,7 -98,6 +98,6 @@@ ide_startstop_t do_rw_taskfile(ide_driv
        if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
                ide_tf_dump(drive->name, cmd);
                tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-               SELECT_MASK(drive, 0);
  
                if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
                        u8 data[2] = { cmd->tf.data, cmd->hob.data };
@@@ -166,7 -165,7 +165,7 @@@ static ide_startstop_t task_no_data_int
        if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
                if (custom && tf->command == ATA_CMD_SET_MULTI) {
                        drive->mult_req = drive->mult_count = 0;
-                       drive->special.b.recalibrate = 1;
+                       drive->special_flags |= IDE_SFLAG_RECALIBRATE;
                        (void)ide_dump_status(drive, __func__, stat);
                        return ide_stopped;
                } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
@@@ -385,7 -384,7 +384,7 @@@ out_end
        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
                ide_finish_cmd(drive, cmd, stat);
        else
 -              ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9);
 +              ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
        return ide_stopped;
  out_err:
        ide_error_cmd(drive, cmd);
index e24ecc87a9b1837a60bdfce7ae59fffd1b2ad7f1,fe01db679a39c80c71601ae00d9490dd34168bcb..b6abf7e52cacb04cfa011e7a1f2adf476c28d885
  
  #define PDC202XX_DEBUG_DRIVE_INFO     0
  
- static const char *pdc_quirk_drives[] = {
-       "QUANTUM FIREBALLlct08 08",
-       "QUANTUM FIREBALLP KA6.4",
-       "QUANTUM FIREBALLP KA9.1",
-       "QUANTUM FIREBALLP LM20.4",
-       "QUANTUM FIREBALLP KX13.6",
-       "QUANTUM FIREBALLP KX20.5",
-       "QUANTUM FIREBALLP KX27.3",
-       "QUANTUM FIREBALLP LM20.5",
-       NULL
- };
  static void pdc_old_disable_66MHz_clock(ide_hwif_t *);
  
  static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
@@@ -151,19 -139,6 +139,6 @@@ static void pdc_old_disable_66MHz_clock
        outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
  }
  
- static void pdc202xx_quirkproc(ide_drive_t *drive)
- {
-       const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
-       for (list = pdc_quirk_drives; *list != NULL; list++)
-               if (strstr(m, *list) != NULL) {
-                       drive->quirk_list = 2;
-                       return;
-               }
-       drive->quirk_list = 0;
- }
  static void pdc202xx_dma_start(ide_drive_t *drive)
  {
        if (drive->current_speed > XFER_UDMA_2)
                u8 clock = inb(high_16 + 0x11);
  
                outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
 -              word_count = (rq->nr_sectors << 8);
 +              word_count = (blk_rq_sectors(rq) << 8);
                word_count = (rq_data_dir(rq) == READ) ?
                                        word_count | 0x05000000 :
                                        word_count | 0x06000000;
@@@ -203,52 -178,6 +178,6 @@@ static int pdc202xx_dma_end(ide_drive_
        return ide_dma_end(drive);
  }
  
- static int pdc202xx_dma_test_irq(ide_drive_t *drive)
- {
-       ide_hwif_t *hwif        = drive->hwif;
-       unsigned long high_16   = hwif->extra_base - 16;
-       u8 dma_stat             = inb(hwif->dma_base + ATA_DMA_STATUS);
-       u8 sc1d                 = inb(high_16 + 0x001d);
-       if (hwif->channel) {
-               /* bit7: Error, bit6: Interrupting, bit5: FIFO Full, bit4: FIFO Empty */
-               if ((sc1d & 0x50) == 0x50)
-                       goto somebody_else;
-               else if ((sc1d & 0x40) == 0x40)
-                       return (dma_stat & 4) == 4;
-       } else {
-               /* bit3: Error, bit2: Interrupting, bit1: FIFO Full, bit0: FIFO Empty */
-               if ((sc1d & 0x05) == 0x05)
-                       goto somebody_else;
-               else if ((sc1d & 0x04) == 0x04)
-                       return (dma_stat & 4) == 4;
-       }
- somebody_else:
-       return (dma_stat & 4) == 4;     /* return 1 if INTR asserted */
- }
- static void pdc202xx_reset(ide_drive_t *drive)
- {
-       ide_hwif_t *hwif        = drive->hwif;
-       unsigned long high_16   = hwif->extra_base - 16;
-       u8 udma_speed_flag      = inb(high_16 | 0x001f);
-       printk(KERN_WARNING "PDC202xx: software reset...\n");
-       outb(udma_speed_flag | 0x10, high_16 | 0x001f);
-       mdelay(100);
-       outb(udma_speed_flag & ~0x10, high_16 | 0x001f);
-       mdelay(2000);   /* 2 seconds ?! */
-       ide_set_max_pio(drive);
- }
- static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
- {
-       pdc202xx_reset(drive);
-       ide_dma_lost_irq(drive);
- }
  static int init_chipset_pdc202xx(struct pci_dev *dev)
  {
        unsigned long dmabase = pci_resource_start(dev, 4);
@@@ -302,37 -231,22 +231,22 @@@ static void __devinit pdc202ata4_fixup_
  static const struct ide_port_ops pdc20246_port_ops = {
        .set_pio_mode           = pdc202xx_set_pio_mode,
        .set_dma_mode           = pdc202xx_set_mode,
-       .quirkproc              = pdc202xx_quirkproc,
  };
  
  static const struct ide_port_ops pdc2026x_port_ops = {
        .set_pio_mode           = pdc202xx_set_pio_mode,
        .set_dma_mode           = pdc202xx_set_mode,
-       .quirkproc              = pdc202xx_quirkproc,
-       .resetproc              = pdc202xx_reset,
        .cable_detect           = pdc2026x_cable_detect,
  };
  
- static const struct ide_dma_ops pdc20246_dma_ops = {
-       .dma_host_set           = ide_dma_host_set,
-       .dma_setup              = ide_dma_setup,
-       .dma_start              = ide_dma_start,
-       .dma_end                = ide_dma_end,
-       .dma_test_irq           = pdc202xx_dma_test_irq,
-       .dma_lost_irq           = ide_dma_lost_irq,
-       .dma_timer_expiry       = ide_dma_sff_timer_expiry,
-       .dma_sff_read_status    = ide_dma_sff_read_status,
- };
  static const struct ide_dma_ops pdc2026x_dma_ops = {
        .dma_host_set           = ide_dma_host_set,
        .dma_setup              = ide_dma_setup,
        .dma_start              = pdc202xx_dma_start,
        .dma_end                = pdc202xx_dma_end,
-       .dma_test_irq           = pdc202xx_dma_test_irq,
-       .dma_lost_irq           = pdc202xx_dma_lost_irq,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
        .dma_timer_expiry       = ide_dma_sff_timer_expiry,
-       .dma_clear              = pdc202xx_reset,
        .dma_sff_read_status    = ide_dma_sff_read_status,
  };
  
@@@ -354,7 -268,7 +268,7 @@@ static const struct ide_port_info pdc20
                .name           = DRV_NAME,
                .init_chipset   = init_chipset_pdc202xx,
                .port_ops       = &pdc20246_port_ops,
-               .dma_ops        = &pdc20246_dma_ops,
+               .dma_ops        = &sff_dma_ops,
                .host_flags     = IDE_HFLAGS_PDC202XX,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
diff --combined drivers/ide/tx4939ide.c
index 5ca76224f6d11a731b585f98a35ea8e2661c4e5e,9f73fd43d1f46f2f2afed006109092754d833c11..64b58ecc3f0ea7131d697634e4c848554f5b7e17
@@@ -307,7 -307,7 +307,7 @@@ static int tx4939ide_dma_setup(ide_driv
        tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
                         TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
  
 -      tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt);
 +      tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
  
        return 0;
  }
@@@ -537,8 -537,7 +537,7 @@@ static const struct ide_port_info tx493
  
  static int __init tx4939ide_probe(struct platform_device *pdev)
  {
-       hw_regs_t hw;
-       hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
+       struct ide_hw hw, *hws[] = { &hw };
        struct ide_host *host;
        struct resource *res;
        int irq, ret;
        hw.dev = &pdev->dev;
  
        pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq);
-       host = ide_host_alloc(&tx4939ide_port_info, hws);
+       host = ide_host_alloc(&tx4939ide_port_info, hws, 1);
        if (!host)
                return -ENOMEM;
        /* use extra_base for base address of the all registers */
diff --combined fs/partitions/check.c
index 0af36085eb28b21498036ce42f5fb682153b7baa,4bc2c43fa0839f6493ab6a0de7ec2638d577f8dc..1a9c7878f8649b1df14531f66aa247fd2587e2c8
@@@ -219,13 -219,6 +219,13 @@@ ssize_t part_size_show(struct device *d
        return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
  }
  
 +ssize_t part_alignment_offset_show(struct device *dev,
 +                                 struct device_attribute *attr, char *buf)
 +{
 +      struct hd_struct *p = dev_to_part(dev);
 +      return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset);
 +}
 +
  ssize_t part_stat_show(struct device *dev,
                       struct device_attribute *attr, char *buf)
  {
@@@ -279,7 -272,6 +279,7 @@@ ssize_t part_fail_store(struct device *
  static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
  static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
  static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
 +static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
  static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
  #ifdef CONFIG_FAIL_MAKE_REQUEST
  static struct device_attribute dev_attr_fail =
@@@ -290,7 -282,6 +290,7 @@@ static struct attribute *part_attrs[] 
        &dev_attr_partition.attr,
        &dev_attr_start.attr,
        &dev_attr_size.attr,
 +      &dev_attr_alignment_offset.attr,
        &dev_attr_stat.attr,
  #ifdef CONFIG_FAIL_MAKE_REQUEST
        &dev_attr_fail.attr,
@@@ -392,7 -383,6 +392,7 @@@ struct hd_struct *add_partition(struct 
        pdev = part_to_dev(p);
  
        p->start_sect = start;
 +      p->alignment_offset = queue_sector_alignment_offset(disk->queue, start);
        p->nr_sects = len;
        p->partno = partno;
        p->policy = get_disk_ro(disk);
@@@ -556,27 -546,49 +556,49 @@@ int rescan_partitions(struct gendisk *d
  
        /* add partitions */
        for (p = 1; p < state->limit; p++) {
-               sector_t size = state->parts[p].size;
-               sector_t from = state->parts[p].from;
+               sector_t size, from;
+ try_scan:
+               size = state->parts[p].size;
                if (!size)
                        continue;
+               from = state->parts[p].from;
                if (from >= get_capacity(disk)) {
                        printk(KERN_WARNING
                               "%s: p%d ignored, start %llu is behind the end of the disk\n",
                               disk->disk_name, p, (unsigned long long) from);
                        continue;
                }
                if (from + size > get_capacity(disk)) {
-                       /*
-                        * we can not ignore partitions of broken tables
-                        * created by for example camera firmware, but we
-                        * limit them to the end of the disk to avoid
-                        * creating invalid block devices
-                        */
+                       struct block_device_operations *bdops = disk->fops;
+                       unsigned long long capacity;
                        printk(KERN_WARNING
-                              "%s: p%d size %llu limited to end of disk\n",
+                              "%s: p%d size %llu exceeds device capacity, ",
                               disk->disk_name, p, (unsigned long long) size);
-                       size = get_capacity(disk) - from;
+                       if (bdops->set_capacity &&
+                           (disk->flags & GENHD_FL_NATIVE_CAPACITY) == 0) {
+                               printk(KERN_CONT "enabling native capacity\n");
+                               capacity = bdops->set_capacity(disk, ~0ULL);
+                               disk->flags |= GENHD_FL_NATIVE_CAPACITY;
+                               if (capacity > get_capacity(disk)) {
+                                       set_capacity(disk, capacity);
+                                       check_disk_size_change(disk, bdev);
+                                       bdev->bd_invalidated = 0;
+                               }
+                               goto try_scan;
+                       } else {
+                               /*
+                                * we can not ignore partitions of broken tables
+                                * created by for example camera firmware, but
+                                * we limit them to the end of the disk to avoid
+                                * creating invalid block devices
+                                */
+                               printk(KERN_CONT "limited to end of disk\n");
+                               size = get_capacity(disk) - from;
+                       }
                }
                part = add_partition(disk, p, from, size,
                                     state->parts[p].flags);
diff --combined include/linux/blkdev.h
index ebdfde8fe556166f19a81a59736cdb3b9aacaf6f,a2d7298be351211b5ca6fdb8f650458044a39d0f..0b1a6cae9de1ebbc5010c2464ddc43cf33ae462b
@@@ -118,7 -118,6 +118,7 @@@ enum rq_flag_bits 
        __REQ_COPY_USER,        /* contains copies of user pages */
        __REQ_INTEGRITY,        /* integrity metadata has been remapped */
        __REQ_NOIDLE,           /* Don't anticipate more IO after this one */
 +      __REQ_IO_STAT,          /* account I/O stat */
        __REQ_NR_BITS,          /* stops here */
  };
  
  #define REQ_COPY_USER (1 << __REQ_COPY_USER)
  #define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
  #define REQ_NOIDLE    (1 << __REQ_NOIDLE)
 +#define REQ_IO_STAT   (1 << __REQ_IO_STAT)
  
  #define BLK_MAX_CDB   16
  
@@@ -166,9 -164,19 +166,9 @@@ struct request 
        enum rq_cmd_type_bits cmd_type;
        unsigned long atomic_flags;
  
 -      /* Maintain bio traversal state for part by part I/O submission.
 -       * hard_* are block layer internals, no driver should touch them!
 -       */
 -
 -      sector_t sector;                /* next sector to submit */
 -      sector_t hard_sector;           /* next sector to complete */
 -      unsigned long nr_sectors;       /* no. of sectors left to submit */
 -      unsigned long hard_nr_sectors;  /* no. of sectors left to complete */
 -      /* no. of sectors left to submit in the current segment */
 -      unsigned int current_nr_sectors;
 -
 -      /* no. of sectors left to complete in the current segment */
 -      unsigned int hard_cur_sectors;
 +      /* the following two fields are internal, NEVER access directly */
 +      sector_t __sector;              /* sector cursor */
 +      unsigned int __data_len;        /* total data len */
  
        struct bio *bio;
        struct bio *biotail;
  
        unsigned short ioprio;
  
 -      void *special;
 -      char *buffer;
 +      void *special;          /* opaque pointer available for LLD use */
 +      char *buffer;           /* kaddr of the current segment if available */
  
        int tag;
        int errors;
        unsigned char __cmd[BLK_MAX_CDB];
        unsigned char *cmd;
  
 -      unsigned int data_len;
        unsigned int extra_len; /* length of alignment and padding */
        unsigned int sense_len;
 -      void *data;
 +      unsigned int resid_len; /* residual count */
        void *sense;
  
        unsigned long deadline;
@@@ -307,26 -316,6 +307,26 @@@ struct blk_cmd_filter 
        struct kobject kobj;
  };
  
 +struct queue_limits {
 +      unsigned long           bounce_pfn;
 +      unsigned long           seg_boundary_mask;
 +
 +      unsigned int            max_hw_sectors;
 +      unsigned int            max_sectors;
 +      unsigned int            max_segment_size;
 +      unsigned int            physical_block_size;
 +      unsigned int            alignment_offset;
 +      unsigned int            io_min;
 +      unsigned int            io_opt;
 +
 +      unsigned short          logical_block_size;
 +      unsigned short          max_hw_segments;
 +      unsigned short          max_phys_segments;
 +
 +      unsigned char           misaligned;
 +      unsigned char           no_cluster;
 +};
 +
  struct request_queue
  {
        /*
        /*
         * queue needs bounce pages for pages above this limit
         */
 -      unsigned long           bounce_pfn;
        gfp_t                   bounce_gfp;
  
        /*
        unsigned int            nr_congestion_off;
        unsigned int            nr_batching;
  
 -      unsigned int            max_sectors;
 -      unsigned int            max_hw_sectors;
 -      unsigned short          max_phys_segments;
 -      unsigned short          max_hw_segments;
 -      unsigned short          hardsect_size;
 -      unsigned int            max_segment_size;
 -
 -      unsigned long           seg_boundary_mask;
        void                    *dma_drain_buffer;
        unsigned int            dma_drain_size;
        unsigned int            dma_pad_mask;
        struct list_head        tag_busy_list;
  
        unsigned int            nr_sorted;
 -      unsigned int            in_flight;
 +      unsigned int            in_flight[2];
  
        unsigned int            rq_timeout;
        struct timer_list       timeout;
        struct list_head        timeout_list;
  
 +      struct queue_limits     limits;
 +
        /*
         * sg stuff
         */
@@@ -524,11 -520,6 +524,11 @@@ static inline void queue_flag_clear_unl
        __clear_bit(flag, &q->queue_flags);
  }
  
 +static inline int queue_in_flight(struct request_queue *q)
 +{
 +      return q->in_flight[0] + q->in_flight[1];
 +}
 +
  static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
  {
        WARN_ON_ONCE(!queue_is_locked(q));
@@@ -607,7 -598,6 +607,7 @@@ enum 
                                 blk_failfast_transport(rq) ||  \
                                 blk_failfast_driver(rq))
  #define blk_rq_started(rq)    ((rq)->cmd_flags & REQ_STARTED)
 +#define blk_rq_io_stat(rq)    ((rq)->cmd_flags & REQ_IO_STAT)
  #define blk_rq_quiet(rq)      ((rq)->cmd_flags & REQ_QUIET)
  
  #define blk_account_rq(rq)    (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 
@@@ -759,17 -749,10 +759,17 @@@ extern void blk_rq_init(struct request_
  extern void blk_put_request(struct request *);
  extern void __blk_put_request(struct request_queue *, struct request *);
  extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
 +extern struct request *blk_make_request(struct request_queue *, struct bio *,
 +                                      gfp_t);
  extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
  extern void blk_requeue_request(struct request_queue *, struct request *);
  extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
  extern int blk_lld_busy(struct request_queue *q);
 +extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 +                           struct bio_set *bs, gfp_t gfp_mask,
 +                           int (*bio_ctr)(struct bio *, struct bio *, void *),
 +                           void *data);
 +extern void blk_rq_unprep_clone(struct request *rq);
  extern int blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
  extern void blk_plug_device(struct request_queue *);
@@@ -781,6 -764,12 +781,6 @@@ extern int scsi_cmd_ioctl(struct reques
  extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                         struct scsi_ioctl_command __user *);
  
 -/*
 - * Temporary export, until SCSI gets fixed up.
 - */
 -extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 -                           struct bio *bio);
 -
  /*
   * A queue has just exitted congestion.  Note this in the global counter of
   * congested queues, and wake up anyone who was waiting for requests to be
@@@ -806,6 -795,7 +806,6 @@@ extern void blk_sync_queue(struct reque
  extern void __blk_stop_queue(struct request_queue *q);
  extern void __blk_run_queue(struct request_queue *);
  extern void blk_run_queue(struct request_queue *);
 -extern void blk_start_queueing(struct request_queue *);
  extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
                           gfp_t);
@@@ -838,73 -828,41 +838,73 @@@ static inline void blk_run_address_spac
                blk_run_backing_dev(mapping->backing_dev_info, NULL);
  }
  
 -extern void blkdev_dequeue_request(struct request *req);
 +/*
 + * blk_rq_pos()               : the current sector
 + * blk_rq_bytes()     : bytes left in the entire request
 + * blk_rq_cur_bytes() : bytes left in the current segment
 + * blk_rq_sectors()   : sectors left in the entire request
 + * blk_rq_cur_sectors()       : sectors left in the current segment
 + */
 +static inline sector_t blk_rq_pos(const struct request *rq)
 +{
 +      return rq->__sector;
 +}
 +
 +static inline unsigned int blk_rq_bytes(const struct request *rq)
 +{
 +      return rq->__data_len;
 +}
 +
 +static inline int blk_rq_cur_bytes(const struct request *rq)
 +{
 +      return rq->bio ? bio_cur_bytes(rq->bio) : 0;
 +}
 +
 +static inline unsigned int blk_rq_sectors(const struct request *rq)
 +{
 +      return blk_rq_bytes(rq) >> 9;
 +}
 +
 +static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
 +{
 +      return blk_rq_cur_bytes(rq) >> 9;
 +}
 +
 +/*
 + * Request issue related functions.
 + */
 +extern struct request *blk_peek_request(struct request_queue *q);
 +extern void blk_start_request(struct request *rq);
 +extern struct request *blk_fetch_request(struct request_queue *q);
  
  /*
 - * blk_end_request() and friends.
 - * __blk_end_request() and end_request() must be called with
 - * the request queue spinlock acquired.
 + * Request completion related functions.
 + *
 + * blk_update_request() completes given number of bytes and updates
 + * the request without completing it.
 + *
 + * blk_end_request() and friends.  __blk_end_request() must be called
 + * with the request queue spinlock acquired.
   *
   * Several drivers define their own end_request and call
   * blk_end_request() for parts of the original function.
   * This prevents code duplication in drivers.
   */
 -extern int blk_end_request(struct request *rq, int error,
 -                              unsigned int nr_bytes);
 -extern int __blk_end_request(struct request *rq, int error,
 -                              unsigned int nr_bytes);
 -extern int blk_end_bidi_request(struct request *rq, int error,
 -                              unsigned int nr_bytes, unsigned int bidi_bytes);
 -extern void end_request(struct request *, int);
 -extern int blk_end_request_callback(struct request *rq, int error,
 -                              unsigned int nr_bytes,
 -                              int (drv_callback)(struct request *));
 +extern bool blk_update_request(struct request *rq, int error,
 +                             unsigned int nr_bytes);
 +extern bool blk_end_request(struct request *rq, int error,
 +                          unsigned int nr_bytes);
 +extern void blk_end_request_all(struct request *rq, int error);
 +extern bool blk_end_request_cur(struct request *rq, int error);
 +extern bool __blk_end_request(struct request *rq, int error,
 +                            unsigned int nr_bytes);
 +extern void __blk_end_request_all(struct request *rq, int error);
 +extern bool __blk_end_request_cur(struct request *rq, int error);
 +
  extern void blk_complete_request(struct request *);
  extern void __blk_complete_request(struct request *);
  extern void blk_abort_request(struct request *);
  extern void blk_abort_queue(struct request_queue *);
 -extern void blk_update_request(struct request *rq, int error,
 -                             unsigned int nr_bytes);
 -
 -/*
 - * blk_end_request() takes bytes instead of sectors as a complete size.
 - * blk_rq_bytes() returns bytes left to complete in the entire request.
 - * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
 - */
 -extern unsigned int blk_rq_bytes(struct request *rq);
 -extern unsigned int blk_rq_cur_bytes(struct request *rq);
  
  /*
   * Access functions for manipulating queue properties
@@@ -916,20 -874,10 +916,20 @@@ extern void blk_cleanup_queue(struct re
  extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
  extern void blk_queue_bounce_limit(struct request_queue *, u64);
  extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
 +extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
  extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
  extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
  extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 -extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
 +extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
 +extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
 +extern void blk_queue_alignment_offset(struct request_queue *q,
 +                                     unsigned int alignment);
 +extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
 +extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
 +extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 +                          sector_t offset);
 +extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
 +                            sector_t offset);
  extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
  extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
  extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@@ -1016,87 -964,19 +1016,87 @@@ extern void blk_set_cmd_filter_defaults
  
  #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
  
 -static inline int queue_hardsect_size(struct request_queue *q)
 +static inline unsigned long queue_bounce_pfn(struct request_queue *q)
 +{
 +      return q->limits.bounce_pfn;
 +}
 +
 +static inline unsigned long queue_segment_boundary(struct request_queue *q)
 +{
 +      return q->limits.seg_boundary_mask;
 +}
 +
 +static inline unsigned int queue_max_sectors(struct request_queue *q)
 +{
 +      return q->limits.max_sectors;
 +}
 +
 +static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
 +{
 +      return q->limits.max_hw_sectors;
 +}
 +
 +static inline unsigned short queue_max_hw_segments(struct request_queue *q)
 +{
 +      return q->limits.max_hw_segments;
 +}
 +
 +static inline unsigned short queue_max_phys_segments(struct request_queue *q)
 +{
 +      return q->limits.max_phys_segments;
 +}
 +
 +static inline unsigned int queue_max_segment_size(struct request_queue *q)
 +{
 +      return q->limits.max_segment_size;
 +}
 +
 +static inline unsigned short queue_logical_block_size(struct request_queue *q)
  {
        int retval = 512;
  
 -      if (q && q->hardsect_size)
 -              retval = q->hardsect_size;
 +      if (q && q->limits.logical_block_size)
 +              retval = q->limits.logical_block_size;
  
        return retval;
  }
  
 -static inline int bdev_hardsect_size(struct block_device *bdev)
 +static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
 +{
 +      return queue_logical_block_size(bdev_get_queue(bdev));
 +}
 +
 +static inline unsigned int queue_physical_block_size(struct request_queue *q)
 +{
 +      return q->limits.physical_block_size;
 +}
 +
 +static inline unsigned int queue_io_min(struct request_queue *q)
 +{
 +      return q->limits.io_min;
 +}
 +
 +static inline unsigned int queue_io_opt(struct request_queue *q)
 +{
 +      return q->limits.io_opt;
 +}
 +
 +static inline int queue_alignment_offset(struct request_queue *q)
 +{
 +      if (q && q->limits.misaligned)
 +              return -1;
 +
 +      if (q && q->limits.alignment_offset)
 +              return q->limits.alignment_offset;
 +
 +      return 0;
 +}
 +
 +static inline int queue_sector_alignment_offset(struct request_queue *q,
 +                                              sector_t sector)
  {
 -      return queue_hardsect_size(bdev_get_queue(bdev));
 +      return ((sector << 9) - q->limits.alignment_offset)
 +              & (q->limits.io_min - 1);
  }
  
  static inline int queue_dma_alignment(struct request_queue *q)
@@@ -1226,6 -1106,8 +1226,8 @@@ struct block_device_operations 
        int (*direct_access) (struct block_device *, sector_t,
                                                void **, unsigned long *);
        int (*media_changed) (struct gendisk *);
+       unsigned long long (*set_capacity) (struct gendisk *,
+                                               unsigned long long);
        int (*revalidate_disk) (struct gendisk *);
        int (*getgeo)(struct block_device *, struct hd_geometry *);
        struct module *owner;
diff --combined include/linux/genhd.h
index 149fda264c86c9a404befd350d1a7c2006ab3213,239e24b081a9a959f2689b8bc824e44713c3ec3f..7cbd38d363a2f051af1abbd30d7bbf90c066151e
@@@ -90,7 -90,6 +90,7 @@@ struct disk_stats 
  struct hd_struct {
        sector_t start_sect;
        sector_t nr_sects;
 +      sector_t alignment_offset;
        struct device __dev;
        struct kobject *holder_dir;
        int policy, partno;
  #define GENHD_FL_UP                           16
  #define GENHD_FL_SUPPRESS_PARTITION_INFO      32
  #define GENHD_FL_EXT_DEVT                     64 /* allow extended devt */
+ #define GENHD_FL_NATIVE_CAPACITY              128
  
  #define BLK_SCSI_MAX_CMDS     (256)
  #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
@@@ -215,7 -215,6 +216,7 @@@ static inline void disk_put_part(struc
  #define DISK_PITER_REVERSE    (1 << 0) /* iterate in the reverse direction */
  #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
  #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
 +#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
  
  struct disk_part_iter {
        struct gendisk          *disk;