Merge branch 'for-4.6/drivers' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 19 Mar 2016 00:13:31 +0000 (17:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 19 Mar 2016 00:13:31 +0000 (17:13 -0700)
Pull block driver updates from Jens Axboe:
 "This is the block driver pull request for this merge window.  It sits
  on top of for-4.6/core, that was just sent out.

  This contains:

   - A set of fixes for lightnvm.  One from Alan, fixing an overflow,
     and the rest from the usual suspects, Javier and Matias.

   - A set of fixes for nbd from Markus and Dan, and a fixup from Arnd
     for correct usage of the signed 64-bit divider.

   - A set of bug fixes for the Micron mtip32xx, from Asai.

   - A fix for the brd discard handling from Bart.

   - Update the maintainers entry for cciss, since that hardware has
     transferred ownership.

   - Three bug fixes for bcache from Eric Wheeler.

   - Set of fixes for xen-blk{back,front} from Jan and Konrad.

   - Removal of the cpqarray driver.  It has been disabled in Kconfig
     since 2013, and we were initially scheduled to remove it in 3.15.

   - Various updates and fixes for NVMe, with the most important being:

        - Removal of the per-device NVMe thread, replacing that with a
          watchdog timer instead. From Christoph.

        - Exposing the namespace WWID through sysfs, from Keith.

        - Set of cleanups from Ming Lin.

        - Logging the controller device name instead of the underlying
          PCI device name, from Sagi.

        - And a bunch of fixes and optimizations from the usual suspects
          in this area"

* 'for-4.6/drivers' of git://git.kernel.dk/linux-block: (49 commits)
  NVMe: Expose ns wwid through single sysfs entry
  drivers:block: cpqarray clean up
  brd: Fix discard request processing
  cpqarray: remove it from the kernel
  cciss: update MAINTAINERS
  NVMe: Remove unused sq_head read in completion path
  bcache: fix cache_set_flush() NULL pointer dereference on OOM
  bcache: cleaned up error handling around register_cache()
  bcache: fix race of writeback thread starting before complete initialization
  NVMe: Create discard zero quirk white list
  nbd: use correct div_s64 helper
  mtip32xx: remove unneeded variable in mtip_cmd_timeout()
  lightnvm: generalize rrpc ppa calculations
  lightnvm: remove struct nvm_dev->total_blocks
  lightnvm: rename ->nr_pages to ->nr_sects
  lightnvm: update closed list outside of intr context
  xen/blback: Fit the important information of the thread in 17 characters
  lightnvm: fold get bb tbl when using dual/quad plane mode
  lightnvm: fix up nonsensical configure overrun checking
  xen-blkback: advertise indirect segment support earlier
  ...

1  2 
MAINTAINERS
drivers/block/xen-blkfront.c
drivers/lightnvm/core.c
drivers/lightnvm/rrpc.c
drivers/lightnvm/rrpc.h
drivers/nvme/host/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/lightnvm.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
include/linux/lightnvm.h

diff --combined MAINTAINERS
index 15b4c417211ffcb00d48fb04fa1ff28324f86c75,e1d64419f741ea3c24e351bf64e16b595f2e2dfb..1c6d7781812ef27f219a5f9c17ca2caf0bb4c5c6
@@@ -223,7 -223,9 +223,7 @@@ F: drivers/scsi/aacraid
  
  ABI/API
  L:    linux-api@vger.kernel.org
 -F:    Documentation/ABI/
  F:    include/linux/syscalls.h
 -F:    include/uapi/
  F:    kernel/sys_ni.c
  
  ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
@@@ -238,12 -240,6 +238,12 @@@ L:       lm-sensors@lm-sensors.or
  S:    Maintained
  F:    drivers/hwmon/abituguru3.c
  
 +ACCES 104-DIO-48E GPIO DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-gpio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/gpio/gpio-104-dio-48e.c
 +
  ACCES 104-IDI-48 GPIO DRIVER
  M:    "William Breathitt Gray" <vilhelm.gray@gmail.com>
  L:    linux-gpio@vger.kernel.org
@@@ -690,6 -686,13 +690,6 @@@ M:        Michael Hanselmann <linux-kernel@han
  S:    Supported
  F:    drivers/macintosh/ams/
  
 -AMSO1100 RNIC DRIVER
 -M:    Tom Tucker <tom@opengridcomputing.com>
 -M:    Steve Wise <swise@opengridcomputing.com>
 -L:    linux-rdma@vger.kernel.org
 -S:    Maintained
 -F:    drivers/infiniband/hw/amso1100/
 -
  ANALOG DEVICES INC AD9389B DRIVER
  M:    Hans Verkuil <hans.verkuil@cisco.com>
  L:    linux-media@vger.kernel.org
@@@ -775,12 -778,6 +775,12 @@@ L:       alsa-devel@alsa-project.org (moderat
  S:    Maintained
  F:    sound/aoa/
  
 +APEX EMBEDDED SYSTEMS STX104 DAC DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/iio/dac/stx104.c
 +
  APM DRIVER
  M:    Jiri Kosina <jikos@kernel.org>
  S:    Odd fixes
@@@ -932,24 -929,17 +932,24 @@@ M:      Emilio López <emilio@elopez.com.ar
  S:    Maintained
  F:    drivers/clk/sunxi/
  
 -ARM/Amlogic MesonX SoC support
 +ARM/Amlogic Meson SoC support
  M:    Carlo Caione <carlo@caione.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +L:    linux-meson@googlegroups.com
 +W:    http://linux-meson.com/
  S:    Maintained
 -F:    drivers/media/rc/meson-ir.c
 -N:    meson[x68]
 +F:    arch/arm/mach-meson/
 +F:    arch/arm/boot/dts/meson*
 +N:    meson
  
  ARM/Annapurna Labs ALPINE ARCHITECTURE
  M:    Tsahee Zidenberg <tsahee@annapurnalabs.com>
 +M:    Antoine Tenart <antoine.tenart@free-electrons.com>
  S:    Maintained
  F:    arch/arm/mach-alpine/
 +F:    arch/arm/boot/dts/alpine*
 +F:    arch/arm64/boot/dts/al/
 +F:    drivers/*/*alpine*
  
  ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
@@@ -977,8 -967,6 +977,8 @@@ M: Rob Herring <robh@kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/mach-highbank/
 +F:    arch/arm/boot/dts/highbank.dts
 +F:    arch/arm/boot/dts/ecx-*.dts*
  
  ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT
  M:    Krzysztof Halasa <khalasa@piap.pl>
@@@ -1054,7 -1042,6 +1054,7 @@@ M:      Barry Song <baohua@kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
  S:    Maintained
 +F:    arch/arm/boot/dts/prima2*
  F:    arch/arm/mach-prima2/
  F:    drivers/clk/sirf/
  F:    drivers/clocksource/timer-prima2.c
@@@ -1156,10 -1143,6 +1156,10 @@@ W:    http://www.hisilicon.co
  S:    Supported
  T:    git git://github.com/hisilicon/linux-hisi.git
  F:    arch/arm/mach-hisi/
 +F:    arch/arm/boot/dts/hi3*
 +F:    arch/arm/boot/dts/hip*
 +F:    arch/arm/boot/dts/hisi*
 +F:    arch/arm64/boot/dts/hisilicon/
  
  ARM/HP JORNADA 7XX MACHINE SUPPORT
  M:    Kristoffer Ericson <kristoffer.ericson@gmail.com>
@@@ -1236,7 -1219,6 +1236,7 @@@ M:      Santosh Shilimkar <ssantosh@kernel.o
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/mach-keystone/
 +F:    arch/arm/boot/dts/k2*
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
  
  ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
@@@ -1305,7 -1287,6 +1305,7 @@@ L:      linux-arm-kernel@lists.infradead.or
  S:    Maintained
  F:    arch/arm/mach-berlin/
  F:    arch/arm/boot/dts/berlin*
 +F:    arch/arm64/boot/dts/marvell/berlin*
  
  
  ARM/Marvell Dove/MV78xx0/Orion SOC support
@@@ -1444,7 -1425,6 +1444,7 @@@ S:      Maintaine
  F:    arch/arm/boot/dts/qcom-*.dts
  F:    arch/arm/boot/dts/qcom-*.dtsi
  F:    arch/arm/mach-qcom/
 +F:    arch/arm64/boot/dts/qcom/*
  F:    drivers/soc/qcom/
  F:    drivers/tty/serial/msm_serial.h
  F:    drivers/tty/serial/msm_serial.c
@@@ -1461,8 -1441,8 +1461,8 @@@ S:      Maintaine
  ARM/RENESAS ARM64 ARCHITECTURE
  M:    Simon Horman <horms@verge.net.au>
  M:    Magnus Damm <magnus.damm@gmail.com>
 -L:    linux-sh@vger.kernel.org
 -Q:    http://patchwork.kernel.org/project/linux-sh/list/
 +L:    linux-renesas-soc@vger.kernel.org
 +Q:    http://patchwork.kernel.org/project/linux-renesas-soc/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
  S:    Supported
  F:    arch/arm64/boot/dts/renesas/
@@@ -1504,8 -1484,6 +1504,8 @@@ L:      linux-arm-kernel@lists.infradead.or
  L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/boot/dts/s3c*
 +F:    arch/arm/boot/dts/s5p*
 +F:    arch/arm/boot/dts/samsung*
  F:    arch/arm/boot/dts/exynos*
  F:    arch/arm64/boot/dts/exynos/
  F:    arch/arm/plat-samsung/
@@@ -1585,7 -1563,6 +1585,7 @@@ S:      Maintaine
  F:    arch/arm/mach-socfpga/
  F:    arch/arm/boot/dts/socfpga*
  F:    arch/arm/configs/socfpga_defconfig
 +F:    arch/arm64/boot/dts/altera/
  W:    http://www.rocketboards.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
  
@@@ -1739,7 -1716,7 +1739,7 @@@ M:      Lorenzo Pieralisi <lorenzo.pieralisi
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/boot/dts/vexpress*
 -F:    arch/arm64/boot/dts/arm/vexpress*
 +F:    arch/arm64/boot/dts/arm/
  F:    arch/arm/mach-vexpress/
  F:    */*/vexpress*
  F:    */*/*/vexpress*
@@@ -1968,12 -1945,6 +1968,12 @@@ M:    Nicolas Ferre <nicolas.ferre@atmel.c
  S:    Supported
  F:    drivers/tty/serial/atmel_serial.c
  
 +ATMEL SAMA5D2 ADC DRIVER
 +M:    Ludovic Desroches <ludovic.desroches@atmel.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Supported
 +F:    drivers/iio/adc/at91-sama5d2_adc.c
 +
  ATMEL Audio ALSA driver
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
@@@ -2372,7 -2343,6 +2372,7 @@@ F:      arch/arm/mach-bcm
  F:    arch/arm/boot/dts/bcm113*
  F:    arch/arm/boot/dts/bcm216*
  F:    arch/arm/boot/dts/bcm281*
 +F:    arch/arm64/boot/dts/broadcom/
  F:    arch/arm/configs/bcm_defconfig
  F:    drivers/mmc/host/sdhci-bcm-kona.c
  F:    drivers/clocksource/bcm_kona_timer.c
@@@ -2387,6 -2357,14 +2387,6 @@@ T:     git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  N:    bcm2835
  
 -BROADCOM BCM33XX MIPS ARCHITECTURE
 -M:    Kevin Cernekee <cernekee@gmail.com>
 -L:    linux-mips@linux-mips.org
 -S:    Maintained
 -F:    arch/mips/bcm3384/*
 -F:    arch/mips/include/asm/mach-bcm3384/*
 -F:    arch/mips/kernel/*bmips*
 -
  BROADCOM BCM47XX MIPS ARCHITECTURE
  M:    Hauke Mehrtens <hauke@hauke-m.de>
  M:    Rafał Miłecki <zajec5@gmail.com>
@@@ -2440,7 -2418,6 +2440,7 @@@ F:      arch/mips/bmips/
  F:    arch/mips/include/asm/mach-bmips/*
  F:    arch/mips/kernel/*bmips*
  F:    arch/mips/boot/dts/brcm/bcm*.dts*
 +F:    drivers/irqchip/irq-bcm63*
  F:    drivers/irqchip/irq-bcm7*
  F:    drivers/irqchip/irq-brcmstb*
  F:    include/linux/bcm963xx_nvram.h
@@@ -3468,8 -3445,9 +3468,8 @@@ S:      Maintaine
  F:    drivers/usb/dwc2/
  
  DESIGNWARE USB3 DRD IP DRIVER
 -M:    Felipe Balbi <balbi@ti.com>
 +M:    Felipe Balbi <balbi@kernel.org>
  L:    linux-usb@vger.kernel.org
 -L:    linux-omap@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
  S:    Maintained
  F:    drivers/usb/dwc3/
@@@ -3560,6 -3538,13 +3560,6 @@@ L:     driverdev-devel@linuxdriverproject.o
  S:    Maintained
  F:    drivers/staging/dgnc/
  
 -DIGI EPCA PCI PRODUCTS
 -M:    Lidza Louina <lidza.louina@gmail.com>
 -M:    Daeseok Youn <daeseok.youn@gmail.com>
 -L:    driverdev-devel@linuxdriverproject.org
 -S:    Maintained
 -F:    drivers/staging/dgap/
 -
  DIOLAN U2C-12 I2C DRIVER
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-i2c@vger.kernel.org
@@@ -4199,6 -4184,13 +4199,6 @@@ W:     http://aeschi.ch.eu.org/efs
  S:    Orphan
  F:    fs/efs/
  
 -EHCA (IBM GX bus InfiniBand adapter) DRIVER
 -M:    Hoang-Nam Nguyen <hnguyen@de.ibm.com>
 -M:    Christoph Raisch <raisch@de.ibm.com>
 -L:    linux-rdma@vger.kernel.org
 -S:    Supported
 -F:    drivers/infiniband/hw/ehca/
 -
  EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
  M:    Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
  L:    netdev@vger.kernel.org
@@@ -4530,12 -4522,6 +4530,12 @@@ L:    linuxppc-dev@lists.ozlabs.or
  S:    Maintained
  F:    drivers/dma/fsldma.*
  
 +FREESCALE GPMI NAND DRIVER
 +M:    Han Xu <han.xu@nxp.com>
 +L:    linux-mtd@lists.infradead.org
 +S:    Maintained
 +F:    drivers/mtd/nand/gpmi-nand/*
 +
  FREESCALE I2C CPM DRIVER
  M:    Jochen Friedrich <jochen@scram.de>
  L:    linuxppc-dev@lists.ozlabs.org
@@@ -4552,7 -4538,7 +4552,7 @@@ F:      include/linux/platform_data/video-im
  F:    drivers/video/fbdev/imxfb.c
  
  FREESCALE QUAD SPI DRIVER
 -M:    Han Xu <han.xu@freescale.com>
 +M:    Han Xu <han.xu@nxp.com>
  L:    linux-mtd@lists.infradead.org
  S:    Maintained
  F:    drivers/mtd/spi-nor/fsl-quadspi.c
@@@ -4566,15 -4552,6 +4566,15 @@@ S:    Maintaine
  F:    drivers/net/ethernet/freescale/fs_enet/
  F:    include/linux/fs_enet_pd.h
  
 +FREESCALE IMX / MXC FEC DRIVER
 +M:    Fugang Duan <fugang.duan@nxp.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/ethernet/freescale/fec_main.c
 +F:    drivers/net/ethernet/freescale/fec_ptp.c
 +F:    drivers/net/ethernet/freescale/fec.h
 +F:    Documentation/devicetree/bindings/net/fsl-fec.txt
 +
  FREESCALE QUICC ENGINE LIBRARY
  L:    linuxppc-dev@lists.ozlabs.org
  S:    Orphan
@@@ -4838,14 -4815,10 +4838,14 @@@ L:   linux-gpio@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
  S:    Maintained
  F:    Documentation/gpio/
 +F:    Documentation/ABI/testing/gpio-cdev
 +F:    Documentation/ABI/obsolete/sysfs-gpio
  F:    drivers/gpio/
  F:    include/linux/gpio/
  F:    include/linux/gpio.h
  F:    include/asm-generic/gpio.h
 +F:    include/uapi/linux/gpio.h
 +F:    tools/gpio/
  
  GRE DEMULTIPLEXER DRIVER
  M:    Dmitry Kozlov <xeb@mail.ru>
@@@ -4994,7 -4967,6 +4994,7 @@@ F:      include/linux/hw_random.
  
  HARDWARE SPINLOCK CORE
  M:    Ohad Ben-Cohen <ohad@wizery.com>
 +M:    Bjorn Andersson <bjorn.andersson@linaro.org>
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
  F:    Documentation/hwspinlock.txt
@@@ -5016,16 -4988,10 +5016,10 @@@ T:   git git://linuxtv.org/anttip/media_t
  S:    Maintained
  F:    drivers/media/dvb-frontends/hd29l2*
  
- HEWLETT-PACKARD SMART2 RAID DRIVER
- L:    iss_storagedev@hp.com
- S:    Orphan
- F:    Documentation/blockdev/cpqarray.txt
- F:    drivers/block/cpqarray.*
  HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
 -M:    Don Brace <don.brace@pmcs.com>
 +M:    Don Brace <don.brace@microsemi.com>
  L:    iss_storagedev@hp.com
 -L:    storagedev@pmcs.com
 +L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    Documentation/scsi/hpsa.txt
@@@ -5034,9 -5000,9 +5028,9 @@@ F:      include/linux/cciss*.
  F:    include/uapi/linux/cciss*.h
  
  HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
- M:    Don Brace <don.brace@pmcs.com>
+ M:    Don Brace <don.brace@microsemi.com>
  L:    iss_storagedev@hp.com
- L:    storagedev@pmcs.com
+ L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    Documentation/blockdev/cciss.txt
@@@ -5221,7 -5187,6 +5215,7 @@@ F:      arch/x86/kernel/cpu/mshyperv.
  F:    drivers/hid/hid-hyperv.c
  F:    drivers/hv/
  F:    drivers/input/serio/hyperv-keyboard.c
 +F:    drivers/pci/host/pci-hyperv.c
  F:    drivers/net/hyperv/
  F:    drivers/scsi/storvsc_drv.c
  F:    drivers/video/fbdev/hyperv_fb.c
@@@ -5586,7 -5551,6 +5580,7 @@@ F:      drivers/input
  F:    include/linux/input.h
  F:    include/uapi/linux/input.h
  F:    include/linux/input/
 +F:    Documentation/devicetree/bindings/input/
  
  INPUT MULTITOUCH (MT) PROTOCOL
  M:    Henrik Rydberg <rydberg@bitmath.org>
@@@ -5781,7 -5745,6 +5775,7 @@@ S:      Supporte
  F:    include/uapi/linux/mei.h
  F:    include/linux/mei_cl_bus.h
  F:    drivers/misc/mei/*
 +F:    drivers/watchdog/mei_wdt.c
  F:    Documentation/misc-devices/mei/*
  
  INTEL MIC DRIVERS (mic)
@@@ -5840,6 -5803,12 +5834,6 @@@ M:     Juanjo Ciarlante <jjciarla@raiz.uncu
  S:    Maintained
  F:    net/ipv4/netfilter/ipt_MASQUERADE.c
  
 -IPATH DRIVER
 -M:    Mike Marciniszyn <infinipath@intel.com>
 -L:    linux-rdma@vger.kernel.org
 -S:    Maintained
 -F:    drivers/staging/rdma/ipath/
 -
  IPMI SUBSYSTEM
  M:    Corey Minyard <minyard@acm.org>
  L:    openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@@ -6084,7 -6053,7 +6078,7 @@@ S:      Maintaine
  F:    drivers/media/platform/rcar_jpu.c
  
  JSM Neo PCI based serial card
 -M:    Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
 +M:    Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
  L:    linux-serial@vger.kernel.org
  S:    Maintained
  F:    drivers/tty/serial/jsm/
@@@ -6169,7 -6138,7 +6163,7 @@@ F:      include/uapi/linux/sunrpc
  
  KERNEL SELFTEST FRAMEWORK
  M:    Shuah Khan <shuahkh@osg.samsung.com>
 -L:    linux-api@vger.kernel.org
 +L:    linux-kselftest@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/shuah/linux-kselftest
  S:    Maintained
  F:    tools/testing/selftests
@@@ -6602,10 -6571,9 +6596,10 @@@ F:    drivers/platform/x86/hp_accel.
  
  LIVE PATCHING
  M:    Josh Poimboeuf <jpoimboe@redhat.com>
 -M:    Seth Jennings <sjenning@redhat.com>
 +M:    Jessica Yu <jeyu@redhat.com>
  M:    Jiri Kosina <jikos@kernel.org>
 -M:    Vojtech Pavlik <vojtech@suse.com>
 +M:    Miroslav Benes <mbenes@suse.cz>
 +R:    Petr Mladek <pmladek@suse.com>
  S:    Maintained
  F:    kernel/livepatch/
  F:    include/linux/livepatch.h
@@@ -6616,11 -6584,6 +6610,11 @@@ F:    samples/livepatch
  L:    live-patching@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git
  
 +LINUX KERNEL DUMP TEST MODULE (LKDTM)
 +M:    Kees Cook <keescook@chromium.org>
 +S:    Maintained
 +F:    drivers/misc/lkdtm.c
 +
  LLC (802.2)
  M:    Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
  S:    Maintained
@@@ -6706,12 -6669,13 +6700,12 @@@ S:   Maintaine
  F:    arch/arm/mach-lpc32xx/
  
  LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
 -M:    Nagalakshmi Nandigama <nagalakshmi.nandigama@avagotech.com>
 -M:    Praveen Krishnamoorthy <praveen.krishnamoorthy@avagotech.com>
 -M:    Sreekanth Reddy <sreekanth.reddy@avagotech.com>
 -M:    Abhijit Mahajan <abhijit.mahajan@avagotech.com>
 -L:    MPT-FusionLinux.pdl@avagotech.com
 +M:    Sathya Prakash <sathya.prakash@broadcom.com>
 +M:    Chaitra P B <chaitra.basappa@broadcom.com>
 +M:    Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com>
 +L:    MPT-FusionLinux.pdl@broadcom.com
  L:    linux-scsi@vger.kernel.org
 -W:    http://www.lsilogic.com/support
 +W:    http://www.avagotech.com/support/
  S:    Supported
  F:    drivers/message/fusion/
  F:    drivers/scsi/mpt2sas/
@@@ -6804,7 -6768,6 +6798,7 @@@ S:      Maintaine
  F:    Documentation/networking/mac80211-injection.txt
  F:    include/net/mac80211.h
  F:    net/mac80211/
 +F:    drivers/net/wireless/mac80211_hwsim.[ch]
  
  MACVLAN DRIVER
  M:    Patrick McHardy <kaber@trash.net>
@@@ -6934,7 -6897,7 +6928,7 @@@ MAXIM MAX77802 MULTIFUNCTION PMIC DEVIC
  M:    Javier Martinez Canillas <javier@osg.samsung.com>
  L:    linux-kernel@vger.kernel.org
  S:    Supported
 -F:    drivers/*/*max77802.c
 +F:    drivers/*/*max77802*.c
  F:    Documentation/devicetree/bindings/*/*max77802.txt
  F:    include/dt-bindings/*/*max77802.h
  
@@@ -6944,7 -6907,7 +6938,7 @@@ M:      Krzysztof Kozlowski <k.kozlowski@sam
  L:    linux-kernel@vger.kernel.org
  S:    Supported
  F:    drivers/*/max14577.c
 -F:    drivers/*/max77686.c
 +F:    drivers/*/max77686*.c
  F:    drivers/*/max77693.c
  F:    drivers/extcon/extcon-max14577.c
  F:    drivers/extcon/extcon-max77693.c
@@@ -7260,8 -7223,10 +7254,8 @@@ L:     linux-media@vger.kernel.or
  W:    https://linuxtv.org
  W:    http://palosaari.fi/linux/
  Q:    http://patchwork.linuxtv.org/project/linux-media/list/
 -T:    git git://linuxtv.org/anttip/media_tree.git
  S:    Maintained
 -F:    drivers/staging/media/mn88473/
 -F:    drivers/media/dvb-frontends/mn88473.h
 +F:    drivers/media/dvb-frontends/mn88473*
  
  MODULE SUPPORT
  M:    Rusty Russell <rusty@rustcorp.com.au>
@@@ -7399,7 -7364,7 +7393,7 @@@ F:      drivers/tty/isicom.
  F:    include/linux/isicom.h
  
  MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
 -M:    Felipe Balbi <balbi@ti.com>
 +M:    Bin Liu <b-liu@ti.com>
  L:    linux-usb@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
  S:    Maintained
@@@ -7422,17 -7387,6 +7416,17 @@@ W:    https://www.myricom.com/support/down
  S:    Supported
  F:    drivers/net/ethernet/myricom/myri10ge/
  
 +NAND FLASH SUBSYSTEM
 +M:    Boris Brezillon <boris.brezillon@free-electrons.com>
 +R:    Richard Weinberger <richard@nod.at>
 +L:    linux-mtd@lists.infradead.org
 +W:    http://www.linux-mtd.infradead.org/
 +Q:    http://patchwork.ozlabs.org/project/linux-mtd/list/
 +T:    git git://github.com/linux-nand/linux.git
 +S:    Maintained
 +F:    drivers/mtd/nand/
 +F:    include/linux/mtd/nand*.h
 +
  NATSEMI ETHERNET DRIVER (DP8381x)
  S:    Orphan
  F:    drivers/net/ethernet/natsemi/natsemi.c
@@@ -7742,13 -7696,13 +7736,13 @@@ S:   Maintaine
  F:    arch/nios2/
  
  NOKIA N900 POWER SUPPLY DRIVERS
 -M:    Pali Rohár <pali.rohar@gmail.com>
 -S:    Maintained
 +R:    Pali Rohár <pali.rohar@gmail.com>
  F:    include/linux/power/bq2415x_charger.h
  F:    include/linux/power/bq27xxx_battery.h
  F:    include/linux/power/isp1704_charger.h
  F:    drivers/power/bq2415x_charger.c
  F:    drivers/power/bq27xxx_battery.c
 +F:    drivers/power/bq27xxx_battery_i2c.c
  F:    drivers/power/isp1704_charger.c
  F:    drivers/power/rx51_battery.c
  
@@@ -7979,9 -7933,11 +7973,9 @@@ F:     drivers/media/platform/omap3isp
  F:    drivers/staging/media/omap4iss/
  
  OMAP USB SUPPORT
 -M:    Felipe Balbi <balbi@ti.com>
  L:    linux-usb@vger.kernel.org
  L:    linux-omap@vger.kernel.org
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 -S:    Maintained
 +S:    Orphan
  F:    drivers/usb/*/*omap*
  F:    arch/arm/*omap*/usb*
  
@@@ -8207,13 -8163,6 +8201,13 @@@ S:    Maintaine
  F:    Documentation/mn10300/
  F:    arch/mn10300/
  
 +PARALLEL LCD/KEYPAD PANEL DRIVER
 +M:      Willy Tarreau <willy@haproxy.com>
 +M:      Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
 +S:      Odd Fixes
 +F:      Documentation/misc-devices/lcd-panel-cgram.txt
 +F:      drivers/misc/panel.c
 +
  PARALLEL PORT SUBSYSTEM
  M:    Sudip Mukherjee <sudipm.mukherjee@gmail.com>
  M:    Sudip Mukherjee <sudip@vectorindia.org>
@@@ -8412,20 -8361,12 +8406,20 @@@ L:   linux-pci@vger.kernel.or
  S:    Maintained
  F:    drivers/pci/host/*designware*
  
 +PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
 +M:    Joao Pinto <jpinto@synopsys.com>
 +L:    linux-pci@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/designware-pcie.txt
 +F:    drivers/pci/host/pcie-designware-plat.c
 +
  PCI DRIVER FOR GENERIC OF HOSTS
  M:    Will Deacon <will.deacon@arm.com>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/devicetree/bindings/pci/host-generic-pci.txt
 +F:    drivers/pci/host/pci-host-common.c
  F:    drivers/pci/host/pci-host-generic.c
  
  PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
@@@ -8471,14 -8412,6 +8465,14 @@@ L:     linux-arm-msm@vger.kernel.or
  S:     Maintained
  F:     drivers/pci/host/*qcom*
  
 +PCIE DRIVER FOR CAVIUM THUNDERX
 +M:    David Daney <david.daney@cavium.com>
 +L:    linux-pci@vger.kernel.org
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Supported
 +F:    Documentation/devicetree/bindings/pci/pci-thunder-*
 +F:    drivers/pci/host/pci-thunder-*
 +
  PCMCIA SUBSYSTEM
  P:    Linux PCMCIA Team
  L:    linux-pcmcia@lists.infradead.org
@@@ -8521,7 -8454,6 +8515,7 @@@ PERFORMANCE EVENTS SUBSYSTE
  M:    Peter Zijlstra <peterz@infradead.org>
  M:    Ingo Molnar <mingo@redhat.com>
  M:    Arnaldo Carvalho de Melo <acme@kernel.org>
 +R:    Alexander Shishkin <alexander.shishkin@linux.intel.com>
  L:    linux-kernel@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
  S:    Supported
@@@ -8880,7 -8812,6 +8874,7 @@@ L:      linux-arm-kernel@lists.infradead.or
  T:    git git://github.com/hzhuang1/linux.git
  T:    git git://github.com/rjarzmik/linux.git
  S:    Maintained
 +F:    arch/arm/boot/dts/pxa*
  F:    arch/arm/mach-pxa/
  F:    drivers/dma/pxa*
  F:    drivers/pcmcia/pxa2xx*
@@@ -8910,7 -8841,6 +8904,7 @@@ L:      linux-arm-kernel@lists.infradead.or
  T:    git git://github.com/hzhuang1/linux.git
  T:    git git://git.linaro.org/people/ycmiao/pxa-linux.git
  S:    Maintained
 +F:    arch/arm/boot/dts/mmp*
  F:    arch/arm/mach-mmp/
  
  PXA MMCI DRIVER
@@@ -9200,7 -9130,6 +9194,7 @@@ F:      include/linux/regmap.
  
  REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
  M:    Ohad Ben-Cohen <ohad@wizery.com>
 +M:    Bjorn Andersson <bjorn.andersson@linaro.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
  S:    Maintained
  F:    drivers/remoteproc/
@@@ -9209,7 -9138,6 +9203,7 @@@ F:      include/linux/remoteproc.
  
  REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
  M:    Ohad Ben-Cohen <ohad@wizery.com>
 +M:    Bjorn Andersson <bjorn.andersson@linaro.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git
  S:    Maintained
  F:    drivers/rpmsg/
@@@ -9638,12 -9566,6 +9632,12 @@@ M:    Andreas Noever <andreas.noever@gmail
  S:    Maintained
  F:    drivers/thunderbolt/
  
 +TI BQ27XXX POWER SUPPLY DRIVER
 +R:    Andrew F. Davis <afd@ti.com>
 +F:    include/linux/power/bq27xxx_battery.h
 +F:    drivers/power/bq27xxx_battery.c
 +F:    drivers/power/bq27xxx_battery_i2c.c
 +
  TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
  M:    John Stultz <john.stultz@linaro.org>
  M:    Thomas Gleixner <tglx@linutronix.de>
@@@ -9721,7 -9643,7 +9715,7 @@@ F:      drivers/scsi/sg.
  F:    include/scsi/sg.h
  
  SCSI SUBSYSTEM
 -M:    "James E.J. Bottomley" <JBottomley@odin.com>
 +M:    "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
  M:    "Martin K. Petersen" <martin.petersen@oracle.com>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
@@@ -9865,11 -9787,10 +9859,11 @@@ S:   Supporte
  F:    drivers/scsi/be2iscsi/
  
  Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
 -M:    Sathya Perla <sathya.perla@avagotech.com>
 -M:    Ajit Khaparde <ajit.khaparde@avagotech.com>
 -M:    Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
 -M:    Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
 +M:    Sathya Perla <sathya.perla@broadcom.com>
 +M:    Ajit Khaparde <ajit.khaparde@broadcom.com>
 +M:    Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
 +M:    Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
 +M:    Somnath Kotur <somnath.kotur@broadcom.com>
  L:    netdev@vger.kernel.org
  W:    http://www.emulex.com
  S:    Supported
@@@ -10231,7 -10152,6 +10225,7 @@@ S:   Supporte
  F:    drivers/media/pci/solo6x10/
  
  SOFTWARE RAID (Multiple Disks) SUPPORT
 +M:    Shaohua Li <shli@kernel.org>
  L:    linux-raid@vger.kernel.org
  T:    git git://neil.brown.name/md
  S:    Supported
@@@ -10247,7 -10167,7 +10241,7 @@@ F:   drivers/net/ethernet/natsemi/sonic.
  
  SONICS SILICON BACKPLANE DRIVER (SSB)
  M:    Michael Buesch <m@bues.ch>
 -L:    netdev@vger.kernel.org
 +L:    linux-wireless@vger.kernel.org
  S:    Maintained
  F:    drivers/ssb/
  F:    include/linux/ssb/
@@@ -10365,7 -10285,6 +10359,7 @@@ L:   spear-devel@list.st.co
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  W:    http://www.st.com/spear
  S:    Maintained
 +F:    arch/arm/boot/dts/spear*
  F:    arch/arm/mach-spear/
  
  SPEAR CLOCK FRAMEWORK SUPPORT
@@@ -10467,6 -10386,19 +10461,6 @@@ L:  linux-tegra@vger.kernel.or
  S:    Maintained
  F:    drivers/staging/nvec/
  
 -STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
 -M:    Jens Frederich <jfrederich@gmail.com>
 -M:    Daniel Drake <dsd@laptop.org>
 -M:    Jon Nettleton <jon.nettleton@gmail.com>
 -W:    http://wiki.laptop.org/go/DCON
 -S:    Maintained
 -F:    drivers/staging/olpc_dcon/
 -
 -STAGING - PARALLEL LCD/KEYPAD PANEL DRIVER
 -M:    Willy Tarreau <willy@meta-x.org>
 -S:    Odd Fixes
 -F:    drivers/staging/panel/
 -
  STAGING - REALTEK RTL8712U DRIVERS
  M:    Larry Finger <Larry.Finger@lwfinger.net>
  M:    Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
@@@ -10915,14 -10847,6 +10909,14 @@@ L: linux-omap@vger.kernel.or
  S:    Maintained
  F:    drivers/thermal/ti-soc-thermal/
  
 +TI VPE/CAL DRIVERS
 +M:    Benoit Parrot <bparrot@ti.com>
 +L:    linux-media@vger.kernel.org
 +W:    http://linuxtv.org/
 +Q:    http://patchwork.linuxtv.org/project/linux-media/list/
 +S:    Maintained
 +F:    drivers/media/platform/ti-vpe/
 +
  TI CDCE706 CLOCK DRIVER
  M:    Max Filippov <jcmvbkbc@gmail.com>
  S:    Maintained
@@@ -11146,8 -11070,8 +11140,8 @@@ M:   Jarkko Sakkinen <jarkko.sakkinen@lin
  R:    Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
  W:    http://tpmdd.sourceforge.net
  L:    tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
 -Q:    git git://github.com/PeterHuewe/linux-tpmdd.git
 -T:    git https://github.com/PeterHuewe/linux-tpmdd
 +Q:    https://patchwork.kernel.org/project/tpmdd-devel/list/
 +T:    git git://git.infradead.org/users/jjs/linux-tpmdd.git
  S:    Maintained
  F:    drivers/char/tpm/
  
@@@ -11327,7 -11251,7 +11321,7 @@@ F:   include/linux/mtd/ubi.
  F:    include/uapi/mtd/ubi-user.h
  
  USB ACM DRIVER
 -M:    Oliver Neukum <oliver@neukum.org>
 +M:    Oliver Neukum <oneukum@suse.com>
  L:    linux-usb@vger.kernel.org
  S:    Maintained
  F:    Documentation/usb/acm.txt
@@@ -11388,7 -11312,7 +11382,7 @@@ F:   Documentation/usb/ehci.tx
  F:    drivers/usb/host/ehci*
  
  USB GADGET/PERIPHERAL SUBSYSTEM
 -M:    Felipe Balbi <balbi@ti.com>
 +M:    Felipe Balbi <balbi@kernel.org>
  L:    linux-usb@vger.kernel.org
  W:    http://www.linux-usb.org/gadget
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@@ -11451,7 -11375,6 +11445,7 @@@ M:   Valentina Manea <valentina.manea.m@g
  M:    Shuah Khan <shuah.kh@samsung.com>
  L:    linux-usb@vger.kernel.org
  S:    Maintained
 +F:    Documentation/usb/usbip_protocol.txt
  F:    drivers/usb/usbip/
  F:    tools/usb/usbip/
  
@@@ -11465,7 -11388,7 +11459,7 @@@ S:   Maintaine
  F:    drivers/net/usb/pegasus.*
  
  USB PHY LAYER
 -M:    Felipe Balbi <balbi@ti.com>
 +M:    Felipe Balbi <balbi@kernel.org>
  L:    linux-usb@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
  S:    Maintained
@@@ -11942,12 -11865,6 +11936,12 @@@ M: David Härdeman <david@hardeman.nu
  S:    Maintained
  F:    drivers/media/rc/winbond-cir.c
  
 +WINSYSTEMS WS16C48 GPIO DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-gpio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/gpio/gpio-ws16c48.c
 +
  WIMAX STACK
  M:    Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  M:    linux-wimax@intel.com
@@@ -12101,6 -12018,7 +12095,6 @@@ F:   arch/arm64/xen
  F:    arch/arm64/include/asm/xen/
  
  XEN NETWORK BACKEND DRIVER
 -M:    Ian Campbell <ian.campbell@citrix.com>
  M:    Wei Liu <wei.liu2@citrix.com>
  L:    xen-devel@lists.xenproject.org (moderated for non-subscribers)
  L:    netdev@vger.kernel.org
@@@ -12209,7 -12127,7 +12203,7 @@@ F:   drivers/net/hamradio/*scc.
  F:    drivers/net/hamradio/z8530.h
  
  ZBUD COMPRESSED PAGE ALLOCATOR
 -M:    Seth Jennings <sjennings@variantweb.net>
 +M:    Seth Jennings <sjenning@redhat.com>
  L:    linux-mm@kvack.org
  S:    Maintained
  F:    mm/zbud.c
@@@ -12264,7 -12182,7 +12258,7 @@@ F:   include/linux/zsmalloc.
  F:    Documentation/vm/zsmalloc.txt
  
  ZSWAP COMPRESSED SWAP CACHING
 -M:    Seth Jennings <sjennings@variantweb.net>
 +M:    Seth Jennings <sjenning@redhat.com>
  L:    linux-mm@kvack.org
  S:    Maintained
  F:    mm/zswap.c
index 83eb9e6bf8b06673640ff5d5ef05db1555e4912f,008121bdece15f5bb73a580b7c155d9f3bcc35f0..6405b65577926876587300e535490badf71f3414
@@@ -125,8 -125,10 +125,10 @@@ static const struct block_device_operat
   */
  
  static unsigned int xen_blkif_max_segments = 32;
- module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
- MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
+ module_param_named(max_indirect_segments, xen_blkif_max_segments, uint,
+                  S_IRUGO);
+ MODULE_PARM_DESC(max_indirect_segments,
+                "Maximum amount of segments in indirect requests (default is 32)");
  
  static unsigned int xen_blkif_max_queues = 4;
  module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
@@@ -1873,43 -1875,6 +1875,43 @@@ again
        return err;
  }
  
 +static int negotiate_mq(struct blkfront_info *info)
 +{
 +      unsigned int backend_max_queues = 0;
 +      int err;
 +      unsigned int i;
 +
 +      BUG_ON(info->nr_rings);
 +
 +      /* Check if backend supports multiple queues. */
 +      err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
 +                         "multi-queue-max-queues", "%u", &backend_max_queues);
 +      if (err < 0)
 +              backend_max_queues = 1;
 +
 +      info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
 +      /* We need at least one ring. */
 +      if (!info->nr_rings)
 +              info->nr_rings = 1;
 +
 +      info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
 +      if (!info->rinfo) {
 +              xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
 +              return -ENOMEM;
 +      }
 +
 +      for (i = 0; i < info->nr_rings; i++) {
 +              struct blkfront_ring_info *rinfo;
 +
 +              rinfo = &info->rinfo[i];
 +              INIT_LIST_HEAD(&rinfo->indirect_pages);
 +              INIT_LIST_HEAD(&rinfo->grants);
 +              rinfo->dev_info = info;
 +              INIT_WORK(&rinfo->work, blkif_restart_queue);
 +              spin_lock_init(&rinfo->ring_lock);
 +      }
 +      return 0;
 +}
  /**
   * Entry point to this code when a new device is created.  Allocate the basic
   * structures and the ring buffer for communication with the backend, and
@@@ -1920,7 -1885,9 +1922,7 @@@ static int blkfront_probe(struct xenbus
                          const struct xenbus_device_id *id)
  {
        int err, vdevice;
 -      unsigned int r_index;
        struct blkfront_info *info;
 -      unsigned int backend_max_queues = 0;
  
        /* FIXME: Use dynamic device id if this is not set. */
        err = xenbus_scanf(XBT_NIL, dev->nodename,
        }
  
        info->xbdev = dev;
 -      /* Check if backend supports multiple queues. */
 -      err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
 -                         "multi-queue-max-queues", "%u", &backend_max_queues);
 -      if (err < 0)
 -              backend_max_queues = 1;
 -
 -      info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
 -      /* We need at least one ring. */
 -      if (!info->nr_rings)
 -              info->nr_rings = 1;
 -
 -      info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
 -      if (!info->rinfo) {
 -              xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
 +      err = negotiate_mq(info);
 +      if (err) {
                kfree(info);
 -              return -ENOMEM;
 -      }
 -
 -      for (r_index = 0; r_index < info->nr_rings; r_index++) {
 -              struct blkfront_ring_info *rinfo;
 -
 -              rinfo = &info->rinfo[r_index];
 -              INIT_LIST_HEAD(&rinfo->indirect_pages);
 -              INIT_LIST_HEAD(&rinfo->grants);
 -              rinfo->dev_info = info;
 -              INIT_WORK(&rinfo->work, blkif_restart_queue);
 -              spin_lock_init(&rinfo->ring_lock);
 +              return err;
        }
  
        mutex_init(&info->mutex);
@@@ -2135,16 -2125,12 +2137,16 @@@ static int blkif_recover(struct blkfron
  static int blkfront_resume(struct xenbus_device *dev)
  {
        struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 -      int err;
 +      int err = 0;
  
        dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
  
        blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
  
 +      err = negotiate_mq(info);
 +      if (err)
 +              return err;
 +
        err = talk_to_blkback(dev, info);
  
        /*
diff --combined drivers/lightnvm/core.c
index 9f6acd5d1d2e9359730ad595f92d3e902bc40d17,773a55da0e20a4fa9c80475f6cb49db7f4fbdf56..0d1fb6b40c468e9bb4f455c06336972e0551f668
@@@ -250,7 -250,7 +250,7 @@@ int nvm_set_rqd_ppalist(struct nvm_dev 
                return 0;
        }
  
-       plane_cnt = (1 << dev->plane_mode);
+       plane_cnt = dev->plane_mode;
        rqd->nr_pages = plane_cnt * nr_ppas;
  
        if (dev->ops->max_phys_sect < rqd->nr_pages)
@@@ -463,11 -463,7 +463,7 @@@ static int nvm_core_init(struct nvm_de
        dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
        dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
  
-       dev->total_blocks = dev->nr_planes *
-                               dev->blks_per_lun *
-                               dev->luns_per_chnl *
-                               dev->nr_chnls;
-       dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
+       dev->total_secs = dev->nr_luns * dev->sec_per_lun;
        INIT_LIST_HEAD(&dev->online_targets);
        mutex_init(&dev->mlock);
  
@@@ -572,13 -568,11 +568,13 @@@ int nvm_register(struct request_queue *
                }
        }
  
 -      ret = nvm_get_sysblock(dev, &dev->sb);
 -      if (!ret)
 -              pr_err("nvm: device not initialized.\n");
 -      else if (ret < 0)
 -              pr_err("nvm: err (%d) on device initialization\n", ret);
 +      if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
 +              ret = nvm_get_sysblock(dev, &dev->sb);
 +              if (!ret)
 +                      pr_err("nvm: device not initialized.\n");
 +              else if (ret < 0)
 +                      pr_err("nvm: err (%d) on device initialization\n", ret);
 +      }
  
        /* register device with a supported media manager */
        down_write(&nvm_lock);
@@@ -872,20 -866,19 +868,19 @@@ static int nvm_configure_by_str_event(c
  
  static int nvm_configure_get(char *buf, const struct kernel_param *kp)
  {
-       int sz = 0;
-       char *buf_start = buf;
+       int sz;
        struct nvm_dev *dev;
  
-       buf += sprintf(buf, "available devices:\n");
+       sz = sprintf(buf, "available devices:\n");
        down_write(&nvm_lock);
        list_for_each_entry(dev, &nvm_devices, devices) {
-               if (sz > 4095 - DISK_NAME_LEN)
+               if (sz > 4095 - DISK_NAME_LEN - 2)
                        break;
-               buf += sprintf(buf, " %32s\n", dev->name);
+               sz += sprintf(buf + sz, " %32s\n", dev->name);
        }
        up_write(&nvm_lock);
  
-       return buf - buf_start - 1;
+       return sz;
  }
  
  static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
@@@ -1057,11 -1050,9 +1052,11 @@@ static long __nvm_ioctl_dev_init(struc
        strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
        info.fs_ppa.ppa = -1;
  
 -      ret = nvm_init_sysblock(dev, &info);
 -      if (ret)
 -              return ret;
 +      if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
 +              ret = nvm_init_sysblock(dev, &info);
 +              if (ret)
 +                      return ret;
 +      }
  
        memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
  
@@@ -1121,10 -1112,7 +1116,10 @@@ static long nvm_ioctl_dev_factory(struc
                dev->mt = NULL;
        }
  
 -      return nvm_dev_factory(dev, fact.flags);
 +      if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
 +              return nvm_dev_factory(dev, fact.flags);
 +
 +      return 0;
  }
  
  static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
diff --combined drivers/lightnvm/rrpc.c
index 307db1ea22defbcbddcfdc3d63cfe86bda43fa0c,f015fdc9c2819105fbfee945686f2eee9d4e7ca0..82343783aa47239b427fee1bbc7e066e5160b630
@@@ -38,7 -38,7 +38,7 @@@ static void rrpc_page_invalidate(struc
  
        spin_lock(&rblk->lock);
  
-       div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
+       div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
        WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
        rblk->nr_invalid_pages++;
  
@@@ -113,14 -113,24 +113,24 @@@ static void rrpc_discard(struct rrpc *r
  
  static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
  {
-       return (rblk->next_page == rrpc->dev->pgs_per_blk);
+       return (rblk->next_page == rrpc->dev->sec_per_blk);
  }
  
+ /* Calculate relative addr for the given block, considering instantiated LUNs */
+ static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+ {
+       struct nvm_block *blk = rblk->parent;
+       int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
+       return lun_blk * rrpc->dev->sec_per_blk;
+ }
+ /* Calculate global addr for the given block */
  static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
  {
        struct nvm_block *blk = rblk->parent;
  
-       return blk->id * rrpc->dev->pgs_per_blk;
+       return blk->id * rrpc->dev->sec_per_blk;
  }
  
  static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
        l.g.sec = secs;
  
        sector_div(ppa, dev->sec_per_pg);
-       div_u64_rem(ppa, dev->sec_per_blk, &pgs);
+       div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
        l.g.pg = pgs;
  
        sector_div(ppa, dev->pgs_per_blk);
@@@ -191,12 -201,12 +201,12 @@@ static struct rrpc_block *rrpc_get_blk(
                return NULL;
        }
  
-       rblk = &rlun->blocks[blk->id];
+       rblk = rrpc_get_rblk(rlun, blk->id);
        list_add_tail(&rblk->list, &rlun->open_list);
        spin_unlock(&lun->lock);
  
        blk->priv = rblk;
-       bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
+       bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
        rblk->next_page = 0;
        rblk->nr_invalid_pages = 0;
        atomic_set(&rblk->data_cmnt_size, 0);
@@@ -286,11 -296,11 +296,11 @@@ static int rrpc_move_valid_pages(struc
        struct bio *bio;
        struct page *page;
        int slot;
-       int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
+       int nr_sec_per_blk = rrpc->dev->sec_per_blk;
        u64 phys_addr;
        DECLARE_COMPLETION_ONSTACK(wait);
  
-       if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
+       if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
                return 0;
  
        bio = bio_alloc(GFP_NOIO, 1);
        }
  
        page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
 -      if (!page)
 +      if (!page) {
 +              bio_put(bio);
                return -ENOMEM;
 +      }
  
        while ((slot = find_first_zero_bit(rblk->invalid_pages,
-                                           nr_pgs_per_blk)) < nr_pgs_per_blk) {
+                                           nr_sec_per_blk)) < nr_sec_per_blk) {
  
                /* Lock laddr */
-               phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
+               phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
  
  try:
                spin_lock(&rrpc->rev_lock);
@@@ -381,7 -389,7 +391,7 @@@ finished
        mempool_free(page, rrpc->page_pool);
        bio_put(bio);
  
-       if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
+       if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
                pr_err("nvm: failed to garbage collect block\n");
                return -EIO;
        }
@@@ -499,12 -507,21 +509,21 @@@ static void rrpc_gc_queue(struct work_s
        struct rrpc *rrpc = gcb->rrpc;
        struct rrpc_block *rblk = gcb->rblk;
        struct nvm_lun *lun = rblk->parent->lun;
+       struct nvm_block *blk = rblk->parent;
        struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
  
        spin_lock(&rlun->lock);
        list_add_tail(&rblk->prio, &rlun->prio_list);
        spin_unlock(&rlun->lock);
  
+       spin_lock(&lun->lock);
+       lun->nr_open_blocks--;
+       lun->nr_closed_blocks++;
+       blk->state &= ~NVM_BLK_ST_OPEN;
+       blk->state |= NVM_BLK_ST_CLOSED;
+       list_move_tail(&rblk->list, &rlun->closed_list);
+       spin_unlock(&lun->lock);
        mempool_free(gcb, rrpc->gcb_pool);
        pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
                                                        rblk->parent->id);
@@@ -545,7 -562,7 +564,7 @@@ static struct rrpc_addr *rrpc_update_ma
        struct rrpc_addr *gp;
        struct rrpc_rev_addr *rev;
  
-       BUG_ON(laddr >= rrpc->nr_pages);
+       BUG_ON(laddr >= rrpc->nr_sects);
  
        gp = &rrpc->trans_map[laddr];
        spin_lock(&rrpc->rev_lock);
@@@ -668,20 -685,8 +687,8 @@@ static void rrpc_end_io_write(struct rr
                lun = rblk->parent->lun;
  
                cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
-               if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
-                       struct nvm_block *blk = rblk->parent;
-                       struct rrpc_lun *rlun = rblk->rlun;
-                       spin_lock(&lun->lock);
-                       lun->nr_open_blocks--;
-                       lun->nr_closed_blocks++;
-                       blk->state &= ~NVM_BLK_ST_OPEN;
-                       blk->state |= NVM_BLK_ST_CLOSED;
-                       list_move_tail(&rblk->list, &rlun->closed_list);
-                       spin_unlock(&lun->lock);
+               if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
                        rrpc_run_gc(rrpc, rblk);
-               }
        }
  }
  
@@@ -726,7 -731,7 +733,7 @@@ static int rrpc_read_ppalist_rq(struct 
  
        for (i = 0; i < npages; i++) {
                /* We assume that mapping occurs at 4KB granularity */
-               BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
+               BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
                gp = &rrpc->trans_map[laddr + i];
  
                if (gp->rblk) {
@@@ -757,7 -762,7 +764,7 @@@ static int rrpc_read_rq(struct rrpc *rr
        if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
                return NVM_IO_REQUEUE;
  
-       BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
+       BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
        gp = &rrpc->trans_map[laddr];
  
        if (gp->rblk) {
@@@ -1007,21 -1012,21 +1014,21 @@@ static int rrpc_l2p_update(u64 slba, u3
        struct nvm_dev *dev = rrpc->dev;
        struct rrpc_addr *addr = rrpc->trans_map + slba;
        struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
-       sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
        u64 elba = slba + nlb;
        u64 i;
  
-       if (unlikely(elba > dev->total_pages)) {
+       if (unlikely(elba > dev->total_secs)) {
                pr_err("nvm: L2P data from device is out of bounds!\n");
                return -EINVAL;
        }
  
        for (i = 0; i < nlb; i++) {
                u64 pba = le64_to_cpu(entries[i]);
+               unsigned int mod;
                /* LNVM treats address-spaces as silos, LBA and PBA are
                 * equally large and zero-indexed.
                 */
-               if (unlikely(pba >= max_pages && pba != U64_MAX)) {
+               if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
                        pr_err("nvm: L2P data entry is out of bounds!\n");
                        return -EINVAL;
                }
                if (!pba)
                        continue;
  
+               div_u64_rem(pba, rrpc->nr_sects, &mod);
                addr[i].addr = pba;
-               raddr[pba].addr = slba + i;
+               raddr[mod].addr = slba + i;
        }
  
        return 0;
@@@ -1046,16 -1053,16 +1055,16 @@@ static int rrpc_map_init(struct rrpc *r
        sector_t i;
        int ret;
  
-       rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
+       rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
        if (!rrpc->trans_map)
                return -ENOMEM;
  
        rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
-                                                       * rrpc->nr_pages);
+                                                       * rrpc->nr_sects);
        if (!rrpc->rev_trans_map)
                return -ENOMEM;
  
-       for (i = 0; i < rrpc->nr_pages; i++) {
+       for (i = 0; i < rrpc->nr_sects; i++) {
                struct rrpc_addr *p = &rrpc->trans_map[i];
                struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
  
                return 0;
  
        /* Bring up the mapping table from device */
-       ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
-                                                       rrpc_l2p_update, rrpc);
+       ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, rrpc_l2p_update,
+                                                                       rrpc);
        if (ret) {
                pr_err("nvm: rrpc: could not read L2P table.\n");
                return -EINVAL;
@@@ -1141,7 -1148,7 +1150,7 @@@ static int rrpc_luns_init(struct rrpc *
        struct rrpc_lun *rlun;
        int i, j;
  
-       if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+       if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
                pr_err("rrpc: number of pages per block too high.");
                return -EINVAL;
        }
                spin_lock_init(&rlun->lock);
  
                rrpc->total_blocks += dev->blks_per_lun;
-               rrpc->nr_pages += dev->sec_per_lun;
+               rrpc->nr_sects += dev->sec_per_lun;
  
                rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
                                                rrpc->dev->blks_per_lun);
@@@ -1221,9 -1228,9 +1230,9 @@@ static sector_t rrpc_capacity(void *pri
  
        /* cur, gc, and two emergency blocks for each lun */
        reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
-       provisioned = rrpc->nr_pages - reserved;
+       provisioned = rrpc->nr_sects - reserved;
  
-       if (reserved > rrpc->nr_pages) {
+       if (reserved > rrpc->nr_sects) {
                pr_err("rrpc: not enough space available to expose storage.\n");
                return 0;
        }
@@@ -1242,10 -1249,11 +1251,11 @@@ static void rrpc_block_map_update(struc
        struct nvm_dev *dev = rrpc->dev;
        int offset;
        struct rrpc_addr *laddr;
-       u64 paddr, pladdr;
+       u64 bpaddr, paddr, pladdr;
  
-       for (offset = 0; offset < dev->pgs_per_blk; offset++) {
-               paddr = block_to_addr(rrpc, rblk) + offset;
+       bpaddr = block_to_rel_addr(rrpc, rblk);
+       for (offset = 0; offset < dev->sec_per_blk; offset++) {
+               paddr = bpaddr + offset;
  
                pladdr = rrpc->rev_trans_map[paddr].addr;
                if (pladdr == ADDR_EMPTY)
@@@ -1386,7 -1394,7 +1396,7 @@@ static void *rrpc_init(struct nvm_dev *
        blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
  
        pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
-                       rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
+                       rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
  
        mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
  
diff --combined drivers/lightnvm/rrpc.h
index f7b37336353fd56771f776ed420ff07521f2b6d5,0577c4dae05ff05e15879bed5293bcc663a54757..855f4a5ca7ddeb75fbc618b1038312865b28cb97
@@@ -104,7 -104,7 +104,7 @@@ struct rrpc 
        struct rrpc_lun *luns;
  
        /* calculated values */
-       unsigned long long nr_pages;
+       unsigned long long nr_sects;
        unsigned long total_blocks;
  
        /* Write strategy variables. Move these into each for structure for each
@@@ -156,6 -156,15 +156,15 @@@ struct rrpc_rev_addr 
        u64 addr;
  };
  
+ static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
+                                                               int blk_id)
+ {
+       struct rrpc *rrpc = rlun->rrpc;
+       int lun_blk = blk_id % rrpc->dev->blks_per_lun;
+       return &rlun->blocks[lun_blk];
+ }
  static inline sector_t rrpc_get_laddr(struct bio *bio)
  {
        return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
@@@ -174,7 -183,8 +183,7 @@@ static inline sector_t rrpc_get_sector(
  static inline int request_intersects(struct rrpc_inflight_rq *r,
                                sector_t laddr_start, sector_t laddr_end)
  {
 -      return (laddr_end >= r->l_start && laddr_end <= r->l_end) &&
 -              (laddr_start >= r->l_start && laddr_start <= r->l_end);
 +      return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
  }
  
  static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
        sector_t laddr_end = laddr + pages - 1;
        struct rrpc_inflight_rq *rtmp;
  
 +      WARN_ON(irqs_disabled());
 +
        spin_lock_irq(&rrpc->inflights.lock);
        list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
                if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
@@@ -206,7 -214,7 +215,7 @@@ static inline int rrpc_lock_laddr(struc
                                 unsigned pages,
                                 struct rrpc_inflight_rq *r)
  {
-       BUG_ON((laddr + pages) > rrpc->nr_pages);
+       BUG_ON((laddr + pages) > rrpc->nr_sects);
  
        return __rrpc_lock_laddr(rrpc, laddr, pages, r);
  }
@@@ -243,7 -251,7 +252,7 @@@ static inline void rrpc_unlock_rq(struc
        struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
        uint8_t pages = rqd->nr_pages;
  
-       BUG_ON((r->l_start + pages) > rrpc->nr_pages);
+       BUG_ON((r->l_start + pages) > rrpc->nr_sects);
  
        rrpc_unlock_laddr(rrpc, r);
  }
index b586d84f251881801ae75023fd3fd6e6bd884047,2ed30f063a132973beb6f21b15f30b86da1763a7..c894841c6456401dccdbcbff66fb72052ed9dd79
@@@ -1,6 -1,10 +1,10 @@@
+ config NVME_CORE
+       tristate
  config BLK_DEV_NVME
        tristate "NVM Express block device"
        depends on PCI && BLOCK
+       select NVME_CORE
        ---help---
          The NVM Express driver is for solid state drives directly
          connected to the PCI or PCI Express bus.  If you know you
  
  config BLK_DEV_NVME_SCSI
        bool "SCSI emulation for NVMe device nodes"
-       depends on BLK_DEV_NVME
+       depends on NVME_CORE
        ---help---
          This adds support for the SG_IO ioctl on the NVMe character
          and block devices nodes, as well a a translation for a small
          number of selected SCSI commands to NVMe commands to the NVMe
          driver.  If you don't know what this means you probably want
 -        to say N here, and if you know what it means you probably
 -        want to say N as well.
 +        to say N here, unless you run a distro that abuses the SCSI
 +        emulation to provide stable device names for mount by id, like
 +        some OpenSuSE and SLES versions.
diff --combined drivers/nvme/host/core.c
index 03c46412fff4d602477239d0672cbdeb54c13dfb,266918b9bb84ebb1a5348fa5ad97d81886eda585..643f457131c24f5c16efa3a2226803b587affd05
  
  #define NVME_MINORS           (1U << MINORBITS)
  
+ unsigned char admin_timeout = 60;
+ module_param(admin_timeout, byte, 0644);
+ MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
+ EXPORT_SYMBOL_GPL(admin_timeout);
+ unsigned char nvme_io_timeout = 30;
+ module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
+ MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
+ EXPORT_SYMBOL_GPL(nvme_io_timeout);
+ unsigned char shutdown_timeout = 5;
+ module_param(shutdown_timeout, byte, 0644);
+ MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
  static int nvme_major;
  module_param(nvme_major, int, 0);
  
@@@ -40,7 -54,7 +54,7 @@@ static int nvme_char_major
  module_param(nvme_char_major, int, 0);
  
  static LIST_HEAD(nvme_ctrl_list);
- DEFINE_SPINLOCK(dev_list_lock);
static DEFINE_SPINLOCK(dev_list_lock);
  
  static struct class *nvme_class;
  
@@@ -55,9 -69,8 +69,9 @@@ static void nvme_free_ns(struct kref *k
        ns->disk->private_data = NULL;
        spin_unlock(&dev_list_lock);
  
 -      nvme_put_ctrl(ns->ctrl);
        put_disk(ns->disk);
 +      ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
 +      nvme_put_ctrl(ns->ctrl);
        kfree(ns);
  }
  
@@@ -72,11 -85,21 +86,21 @@@ static struct nvme_ns *nvme_get_ns_from
  
        spin_lock(&dev_list_lock);
        ns = disk->private_data;
-       if (ns && !kref_get_unless_zero(&ns->kref))
-               ns = NULL;
+       if (ns) {
+               if (!kref_get_unless_zero(&ns->kref))
+                       goto fail;
+               if (!try_module_get(ns->ctrl->ops->module))
+                       goto fail_put_ns;
+       }
        spin_unlock(&dev_list_lock);
  
        return ns;
+ fail_put_ns:
+       kref_put(&ns->kref, nvme_free_ns);
+ fail:
+       spin_unlock(&dev_list_lock);
+       return NULL;
  }
  
  void nvme_requeue_req(struct request *req)
                blk_mq_kick_requeue_list(req->q);
        spin_unlock_irqrestore(req->q->queue_lock, flags);
  }
+ EXPORT_SYMBOL_GPL(nvme_requeue_req);
  
  struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, unsigned int flags)
  
        req->cmd = (unsigned char *)cmd;
        req->cmd_len = sizeof(struct nvme_command);
-       req->special = (void *)0;
  
        return req;
  }
+ EXPORT_SYMBOL_GPL(nvme_alloc_request);
  
  /*
   * Returns 0 on success.  If the result is negative, it's a Linux error code;
   * if the result is positive, it's an NVM Express status code
   */
  int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-               void *buffer, unsigned bufflen, u32 *result, unsigned timeout)
+               struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+               unsigned timeout)
  {
        struct request *req;
        int ret;
                return PTR_ERR(req);
  
        req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+       req->special = cqe;
  
        if (buffer && bufflen) {
                ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
        }
  
        blk_execute_rq(req->q, NULL, req, 0);
-       if (result)
-               *result = (u32)(uintptr_t)req->special;
        ret = req->errors;
   out:
        blk_mq_free_request(req);
  int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buffer, unsigned bufflen)
  {
-       return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0);
+       return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
  }
+ EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
  
  int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                void __user *ubuffer, unsigned bufflen,
                u32 *result, unsigned timeout)
  {
        bool write = cmd->common.opcode & 1;
+       struct nvme_completion cqe;
        struct nvme_ns *ns = q->queuedata;
        struct gendisk *disk = ns ? ns->disk : NULL;
        struct request *req;
                return PTR_ERR(req);
  
        req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+       req->special = &cqe;
  
        if (ubuffer && bufflen) {
                ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
                        goto out_unmap;
                }
  
 -              if (meta_buffer) {
 +              if (meta_buffer && meta_len) {
                        struct bio_integrity_payload *bip;
  
                        meta = kmalloc(meta_len, GFP_KERNEL);
        blk_execute_rq(req->q, disk, req, 0);
        ret = req->errors;
        if (result)
-               *result = (u32)(uintptr_t)req->special;
+               *result = le32_to_cpu(cqe.result);
        if (meta && !ret && !write) {
                if (copy_to_user(meta_buffer, meta, meta_len))
                        ret = -EFAULT;
@@@ -303,6 -330,8 +331,8 @@@ int nvme_get_features(struct nvme_ctrl 
                                        dma_addr_t dma_addr, u32 *result)
  {
        struct nvme_command c;
+       struct nvme_completion cqe;
+       int ret;
  
        memset(&c, 0, sizeof(c));
        c.features.opcode = nvme_admin_get_features;
        c.features.prp1 = cpu_to_le64(dma_addr);
        c.features.fid = cpu_to_le32(fid);
  
-       return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+       ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+       if (ret >= 0)
+               *result = le32_to_cpu(cqe.result);
+       return ret;
  }
  
  int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
                                        dma_addr_t dma_addr, u32 *result)
  {
        struct nvme_command c;
+       struct nvme_completion cqe;
+       int ret;
  
        memset(&c, 0, sizeof(c));
        c.features.opcode = nvme_admin_set_features;
        c.features.fid = cpu_to_le32(fid);
        c.features.dword11 = cpu_to_le32(dword11);
  
-       return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+       ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+       if (ret >= 0)
+               *result = le32_to_cpu(cqe.result);
+       return ret;
  }
  
  int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
@@@ -364,6 -401,7 +402,7 @@@ int nvme_set_queue_count(struct nvme_ct
        *count = min(*count, nr_io_queues);
        return 0;
  }
+ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
  
  static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
  {
  
        if (copy_from_user(&io, uio, sizeof(io)))
                return -EFAULT;
 +      if (io.flags)
 +              return -EINVAL;
  
        switch (io.opcode) {
        case nvme_cmd_write:
@@@ -427,8 -463,6 +466,8 @@@ static int nvme_user_cmd(struct nvme_ct
                return -EACCES;
        if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
                return -EFAULT;
 +      if (cmd.flags)
 +              return -EINVAL;
  
        memset(&c, 0, sizeof(c));
        c.common.opcode = cmd.opcode;
@@@ -504,7 -538,10 +543,10 @@@ static int nvme_open(struct block_devic
  
  static void nvme_release(struct gendisk *disk, fmode_t mode)
  {
-       nvme_put_ns(disk->private_data);
+       struct nvme_ns *ns = disk->private_data;
+       module_put(ns->ctrl->ops->module);
+       nvme_put_ns(ns);
  }
  
  static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@@ -545,8 -582,14 +587,14 @@@ static void nvme_init_integrity(struct 
  
  static void nvme_config_discard(struct nvme_ns *ns)
  {
+       struct nvme_ctrl *ctrl = ns->ctrl;
        u32 logical_block_size = queue_logical_block_size(ns->queue);
-       ns->queue->limits.discard_zeroes_data = 0;
+       if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
+               ns->queue->limits.discard_zeroes_data = 1;
+       else
+               ns->queue->limits.discard_zeroes_data = 0;
        ns->queue->limits.discard_alignment = logical_block_size;
        ns->queue->limits.discard_granularity = logical_block_size;
        blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
@@@ -561,13 -604,9 +609,13 @@@ static int nvme_revalidate_disk(struct 
        u16 old_ms;
        unsigned short bs;
  
 +      if (test_bit(NVME_NS_DEAD, &ns->flags)) {
 +              set_capacity(disk, 0);
 +              return -ENODEV;
 +      }
        if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
-               dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
-                               __func__, ns->ctrl->instance, ns->ns_id);
+               dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
+                               __func__);
                return -ENODEV;
        }
        if (id->ncap == 0) {
  
        if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
                if (nvme_nvm_register(ns->queue, disk->disk_name)) {
-                       dev_warn(ns->ctrl->dev,
+                       dev_warn(disk_to_dev(ns->disk),
                                "%s: LightNVM init failure\n", __func__);
                        kfree(id);
                        return -ENODEV;
@@@ -750,7 -789,7 +798,7 @@@ static int nvme_wait_ready(struct nvme_
                if (fatal_signal_pending(current))
                        return -EINTR;
                if (time_after(jiffies, timeout)) {
-                       dev_err(ctrl->dev,
+                       dev_err(ctrl->device,
                                "Device not ready; aborting %s\n", enabled ?
                                                "initialisation" : "reset");
                        return -ENODEV;
@@@ -778,6 -817,7 +826,7 @@@ int nvme_disable_ctrl(struct nvme_ctrl 
                return ret;
        return nvme_wait_ready(ctrl, cap, false);
  }
+ EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
  
  int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
  {
        int ret;
  
        if (page_shift < dev_page_min) {
-               dev_err(ctrl->dev,
+               dev_err(ctrl->device,
                        "Minimum device page size %u too large for host (%u)\n",
                        1 << dev_page_min, 1 << page_shift);
                return -ENODEV;
                return ret;
        return nvme_wait_ready(ctrl, cap, true);
  }
+ EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
  
  int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
  {
                if (fatal_signal_pending(current))
                        return -EINTR;
                if (time_after(jiffies, timeout)) {
-                       dev_err(ctrl->dev,
+                       dev_err(ctrl->device,
                                "Device shutdown incomplete; abort shutdown\n");
                        return -ENODEV;
                }
  
        return ret;
  }
+ EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
  
 +static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
 +              struct request_queue *q)
 +{
 +      if (ctrl->max_hw_sectors) {
 +              u32 max_segments =
 +                      (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
 +
 +              blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
 +              blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
 +      }
 +      if (ctrl->stripe_size)
 +              blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
 +      if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
 +              blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
 +      blk_queue_virt_boundary(q, ctrl->page_size - 1);
 +}
 +
  /*
   * Initialize the cached copies of the Identify data and various controller
   * register in our nvme_ctrl structure.  This should be called as soon as
@@@ -870,13 -895,13 +921,13 @@@ int nvme_init_identify(struct nvme_ctr
  
        ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
        if (ret) {
-               dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret);
+               dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
                return ret;
        }
  
        ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
        if (ret) {
-               dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret);
+               dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
                return ret;
        }
        page_shift = NVME_CAP_MPSMIN(cap) + 12;
  
        ret = nvme_identify_ctrl(ctrl, &id);
        if (ret) {
-               dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret);
+               dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
                return -EIO;
        }
  
+       ctrl->vid = le16_to_cpu(id->vid);
        ctrl->oncs = le16_to_cpup(&id->oncs);
        atomic_set(&ctrl->abort_limit, id->acl + 1);
        ctrl->vwc = id->vwc;
+       ctrl->cntlid = le16_to_cpup(&id->cntlid);
        memcpy(ctrl->serial, id->sn, sizeof(id->sn));
        memcpy(ctrl->model, id->mn, sizeof(id->mn));
        memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
                }
        }
  
 +      nvme_set_queue_limits(ctrl, ctrl->admin_q);
 +
        kfree(id);
        return 0;
  }
+ EXPORT_SYMBOL_GPL(nvme_init_identify);
  
  static int nvme_dev_open(struct inode *inode, struct file *file)
  {
@@@ -965,13 -991,13 +1019,13 @@@ static int nvme_dev_user_cmd(struct nvm
  
        ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
        if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
-               dev_warn(ctrl->dev,
+               dev_warn(ctrl->device,
                        "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
                ret = -EINVAL;
                goto out_unlock;
        }
  
-       dev_warn(ctrl->dev,
+       dev_warn(ctrl->device,
                "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
        kref_get(&ns->kref);
        mutex_unlock(&ctrl->namespaces_mutex);
@@@ -997,7 -1023,7 +1051,7 @@@ static long nvme_dev_ioctl(struct file 
        case NVME_IOCTL_IO_CMD:
                return nvme_dev_user_cmd(ctrl, argp);
        case NVME_IOCTL_RESET:
-               dev_warn(ctrl->dev, "resetting controller\n");
+               dev_warn(ctrl->device, "resetting controller\n");
                return ctrl->ops->reset_ctrl(ctrl);
        case NVME_IOCTL_SUBSYS_RESET:
                return nvme_reset_subsystem(ctrl);
@@@ -1028,6 -1054,30 +1082,30 @@@ static ssize_t nvme_sysfs_reset(struct 
  }
  static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
  
+ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
+                                                               char *buf)
+ {
+       struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+       struct nvme_ctrl *ctrl = ns->ctrl;
+       int serial_len = sizeof(ctrl->serial);
+       int model_len = sizeof(ctrl->model);
+       if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
+               return sprintf(buf, "eui.%16phN\n", ns->uuid);
+       if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
+               return sprintf(buf, "eui.%8phN\n", ns->eui);
+       while (ctrl->serial[serial_len - 1] == ' ')
+               serial_len--;
+       while (ctrl->model[model_len - 1] == ' ')
+               model_len--;
+       return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
+               serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
+ }
+ static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
  static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
                                                                char *buf)
  {
@@@ -1053,6 -1103,7 +1131,7 @@@ static ssize_t nsid_show(struct device 
  static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
  
  static struct attribute *nvme_ns_attrs[] = {
+       &dev_attr_wwid.attr,
        &dev_attr_uuid.attr,
        &dev_attr_eui.attr,
        &dev_attr_nsid.attr,
@@@ -1081,7 -1132,7 +1160,7 @@@ static const struct attribute_group nvm
        .is_visible     = nvme_attrs_are_visible,
  };
  
- #define nvme_show_function(field)                                             \
+ #define nvme_show_str_function(field)                                         \
  static ssize_t  field##_show(struct device *dev,                              \
                            struct device_attribute *attr, char *buf)           \
  {                                                                             \
  }                                                                             \
  static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
  
- nvme_show_function(model);
- nvme_show_function(serial);
- nvme_show_function(firmware_rev);
+ #define nvme_show_int_function(field)                                         \
+ static ssize_t  field##_show(struct device *dev,                              \
+                           struct device_attribute *attr, char *buf)           \
+ {                                                                             \
+         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);                                \
+         return sprintf(buf, "%d\n", ctrl->field);     \
+ }                                                                             \
+ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+ nvme_show_str_function(model);
+ nvme_show_str_function(serial);
+ nvme_show_str_function(firmware_rev);
+ nvme_show_int_function(cntlid);
  
  static struct attribute *nvme_dev_attrs[] = {
        &dev_attr_reset_controller.attr,
        &dev_attr_model.attr,
        &dev_attr_serial.attr,
        &dev_attr_firmware_rev.attr,
+       &dev_attr_cntlid.attr,
        NULL
  };
  
@@@ -1146,13 -1208,10 +1236,13 @@@ static void nvme_alloc_ns(struct nvme_c
        if (!ns)
                return;
  
 +      ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
 +      if (ns->instance < 0)
 +              goto out_free_ns;
 +
        ns->queue = blk_mq_init_queue(ctrl->tagset);
        if (IS_ERR(ns->queue))
 -              goto out_free_ns;
 -      queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
 +              goto out_release_instance;
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
        ns->queue->queuedata = ns;
        ns->ctrl = ctrl;
        ns->disk = disk;
        ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
  
 +
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
 -      if (ctrl->max_hw_sectors) {
 -              blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
 -              blk_queue_max_segments(ns->queue,
 -                      (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
 -      }
 -      if (ctrl->stripe_size)
 -              blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
 -      if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
 -              blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
 -      blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
 +      nvme_set_queue_limits(ctrl, ns->queue);
  
        disk->major = nvme_major;
        disk->first_minor = 0;
        disk->queue = ns->queue;
        disk->driverfs_dev = ctrl->device;
        disk->flags = GENHD_FL_EXT_DEVT;
 -      sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, nsid);
 +      sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
  
        if (nvme_revalidate_disk(ns->disk))
                goto out_free_disk;
        kfree(disk);
   out_free_queue:
        blk_cleanup_queue(ns->queue);
 + out_release_instance:
 +      ida_simple_remove(&ctrl->ns_ida, ns->instance);
   out_free_ns:
        kfree(ns);
  }
  
  static void nvme_ns_remove(struct nvme_ns *ns)
  {
 -      bool kill = nvme_io_incapable(ns->ctrl) &&
 -                      !blk_queue_dying(ns->queue);
 -
 -      lockdep_assert_held(&ns->ctrl->namespaces_mutex);
 -
 -      if (kill) {
 -              blk_set_queue_dying(ns->queue);
 +      if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
 +              return;
  
 -              /*
 -               * The controller was shutdown first if we got here through
 -               * device removal. The shutdown may requeue outstanding
 -               * requests. These need to be aborted immediately so
 -               * del_gendisk doesn't block indefinitely for their completion.
 -               */
 -              blk_mq_abort_requeue_list(ns->queue);
 -      }
        if (ns->disk->flags & GENHD_FL_UP) {
                if (blk_get_integrity(ns->disk))
                        blk_integrity_unregister(ns->disk);
                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
                                        &nvme_ns_attr_group);
                del_gendisk(ns->disk);
 -      }
 -      if (kill || !blk_queue_dying(ns->queue)) {
                blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
        }
 +      mutex_lock(&ns->ctrl->namespaces_mutex);
        list_del_init(&ns->list);
 +      mutex_unlock(&ns->ctrl->namespaces_mutex);
        nvme_put_ns(ns);
  }
  
@@@ -1308,14 -1386,18 +1398,16 @@@ void nvme_scan_namespaces(struct nvme_c
        mutex_unlock(&ctrl->namespaces_mutex);
        kfree(id);
  }
+ EXPORT_SYMBOL_GPL(nvme_scan_namespaces);
  
  void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
  {
        struct nvme_ns *ns, *next;
  
 -      mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
                nvme_ns_remove(ns);
 -      mutex_unlock(&ctrl->namespaces_mutex);
  }
+ EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
  
  static DEFINE_IDA(nvme_instance_ida);
  
@@@ -1347,13 -1429,14 +1439,14 @@@ static void nvme_release_instance(struc
  }
  
  void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
 {
+ {
        device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
  
        spin_lock(&dev_list_lock);
        list_del(&ctrl->node);
        spin_unlock(&dev_list_lock);
  }
+ EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
  
  static void nvme_free_ctrl(struct kref *kref)
  {
  
        put_device(ctrl->device);
        nvme_release_instance(ctrl);
 +      ida_destroy(&ctrl->ns_ida);
  
        ctrl->ops->free_ctrl(ctrl);
  }
@@@ -1370,6 -1452,7 +1463,7 @@@ void nvme_put_ctrl(struct nvme_ctrl *ct
  {
        kref_put(&ctrl->kref, nvme_free_ctrl);
  }
+ EXPORT_SYMBOL_GPL(nvme_put_ctrl);
  
  /*
   * Initialize a NVMe controller structures.  This needs to be called during
@@@ -1394,15 -1477,13 +1488,14 @@@ int nvme_init_ctrl(struct nvme_ctrl *ct
  
        ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
                                MKDEV(nvme_char_major, ctrl->instance),
-                               dev, nvme_dev_attr_groups,
+                               ctrl, nvme_dev_attr_groups,
                                "nvme%d", ctrl->instance);
        if (IS_ERR(ctrl->device)) {
                ret = PTR_ERR(ctrl->device);
                goto out_release_instance;
        }
        get_device(ctrl->device);
-       dev_set_drvdata(ctrl->device, ctrl);
 +      ida_init(&ctrl->ns_ida);
  
        spin_lock(&dev_list_lock);
        list_add_tail(&ctrl->node, &nvme_ctrl_list);
@@@ -1414,39 -1495,8 +1507,41 @@@ out_release_instance
  out:
        return ret;
  }
+ EXPORT_SYMBOL_GPL(nvme_init_ctrl);
  
 +/**
 + * nvme_kill_queues(): Ends all namespace queues
 + * @ctrl: the dead controller that needs to end
 + *
 + * Call this function when the driver determines it is unable to get the
 + * controller in a state capable of servicing IO.
 + */
 +void nvme_kill_queues(struct nvme_ctrl *ctrl)
 +{
 +      struct nvme_ns *ns;
 +
 +      mutex_lock(&ctrl->namespaces_mutex);
 +      list_for_each_entry(ns, &ctrl->namespaces, list) {
 +              if (!kref_get_unless_zero(&ns->kref))
 +                      continue;
 +
 +              /*
 +               * Revalidating a dead namespace sets capacity to 0. This will
 +               * end buffered writers dirtying pages that can't be synced.
 +               */
 +              if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
 +                      revalidate_disk(ns->disk);
 +
 +              blk_set_queue_dying(ns->queue);
 +              blk_mq_abort_requeue_list(ns->queue);
 +              blk_mq_start_stopped_hw_queues(ns->queue, true);
 +
 +              nvme_put_ns(ns);
 +      }
 +      mutex_unlock(&ctrl->namespaces_mutex);
 +}
++EXPORT_SYMBOL_GPL(nvme_kill_queues);
 +
  void nvme_stop_queues(struct nvme_ctrl *ctrl)
  {
        struct nvme_ns *ns;
        }
        mutex_unlock(&ctrl->namespaces_mutex);
  }
+ EXPORT_SYMBOL_GPL(nvme_stop_queues);
  
  void nvme_start_queues(struct nvme_ctrl *ctrl)
  {
        }
        mutex_unlock(&ctrl->namespaces_mutex);
  }
+ EXPORT_SYMBOL_GPL(nvme_start_queues);
  
  int __init nvme_core_init(void)
  {
@@@ -1514,3 -1566,8 +1611,8 @@@ void nvme_core_exit(void
        class_destroy(nvme_class);
        __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
  }
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION("1.0");
+ module_init(nvme_core_init);
+ module_exit(nvme_core_exit);
index 6bb15e4926dc86ed8b30e5cc2c504dc29090d545,d4f81f07f29640ab130a22caac311fe4fceca2ac..42a01a93198944717f2f0266aaee7570d0c52467
@@@ -146,10 -146,9 +146,10 @@@ struct nvme_nvm_command 
        };
  };
  
 +#define NVME_NVM_LP_MLC_PAIRS 886
  struct nvme_nvm_lp_mlc {
        __u16                   num_pairs;
 -      __u8                    pairs[886];
 +      __u8                    pairs[NVME_NVM_LP_MLC_PAIRS];
  };
  
  struct nvme_nvm_lp_tbl {
@@@ -283,14 -282,9 +283,14 @@@ static int init_grps(struct nvm_id *nvm
                        memcpy(dst->lptbl.id, src->lptbl.id, 8);
                        dst->lptbl.mlc.num_pairs =
                                        le16_to_cpu(src->lptbl.mlc.num_pairs);
 -                      /* 4 bits per pair */
 +
 +                      if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
 +                              pr_err("nvm: number of MLC pairs not supported\n");
 +                              return -EINVAL;
 +                      }
 +
                        memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
 -                                              dst->lptbl.mlc.num_pairs >> 1);
 +                                              dst->lptbl.mlc.num_pairs);
                }
        }
  
@@@ -379,8 -373,31 +379,31 @@@ out
        return ret;
  }
  
+ static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
+                                               int nr_dst_blks, u8 *dst_blks,
+                                               int nr_src_blks, u8 *src_blks)
+ {
+       int blk, offset, pl, blktype;
+       for (blk = 0; blk < nr_dst_blks; blk++) {
+               offset = blk * nvmdev->plane_mode;
+               blktype = src_blks[offset];
+               /* Bad blocks on any planes take precedence over other types */
+               for (pl = 0; pl < nvmdev->plane_mode; pl++) {
+                       if (src_blks[offset + pl] &
+                                       (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
+                               blktype = src_blks[offset + pl];
+                               break;
+                       }
+               }
+               dst_blks[blk] = blktype;
+       }
+ }
  static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
-                               int nr_blocks, nvm_bb_update_fn *update_bbtbl,
+                               int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
                                void *priv)
  {
        struct request_queue *q = nvmdev->q;
        struct nvme_ctrl *ctrl = ns->ctrl;
        struct nvme_nvm_command c = {};
        struct nvme_nvm_bb_tbl *bb_tbl;
-       int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
+       u8 *dst_blks = NULL;
+       int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
+       int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
        int ret = 0;
  
        c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
        if (!bb_tbl)
                return -ENOMEM;
  
+       dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
+       if (!dst_blks) {
+               ret = -ENOMEM;
+               goto out;
+       }
        ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
                                                                bb_tbl, tblsz);
        if (ret) {
                goto out;
        }
  
-       if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
+       if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
                ret = -EINVAL;
                dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
-                                       le32_to_cpu(bb_tbl->tblks), nr_blocks);
+                               le32_to_cpu(bb_tbl->tblks), nr_src_blks);
                goto out;
        }
  
+       nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
+                                               nr_src_blks, bb_tbl->blk);
        ppa = dev_to_generic_addr(nvmdev, ppa);
-       ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
+       ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
  out:
+       kfree(dst_blks);
        kfree(bb_tbl);
        return ret;
  }
diff --combined drivers/nvme/host/nvme.h
index fb15ba5f5d19f5650fd2cb76643b2217b06891b0,bf3f143e975b500aac48e5e67b31e29cff314e4c..f846da4eb3380fb743fa449010e1e12e2e3e2a7d
@@@ -59,6 -59,12 +59,12 @@@ enum nvme_quirks 
         * correctly.
         */
        NVME_QUIRK_IDENTIFY_CNS                 = (1 << 1),
+       /*
+        * The controller deterministically returns O's on reads to discarded
+        * logical blocks.
+        */
+       NVME_QUIRK_DISCARD_ZEROES               = (1 << 2),
  };
  
  struct nvme_ctrl {
        struct mutex namespaces_mutex;
        struct device *device;  /* char device */
        struct list_head node;
 +      struct ida ns_ida;
  
        char name[12];
        char serial[20];
        char model[40];
        char firmware_rev[8];
+       int cntlid;
  
        u32 ctrl_config;
  
@@@ -85,6 -91,7 +92,7 @@@
        u32 max_hw_sectors;
        u32 stripe_size;
        u16 oncs;
+       u16 vid;
        atomic_t abort_limit;
        u8 event_limit;
        u8 vwc;
@@@ -103,7 -110,6 +111,7 @@@ struct nvme_ns 
        struct request_queue *queue;
        struct gendisk *disk;
        struct kref kref;
 +      int instance;
  
        u8 eui[8];
        u8 uuid[16];
        bool ext;
        u8 pi_type;
        int type;
 +      unsigned long flags;
 +
 +#define NVME_NS_REMOVING 0
 +#define NVME_NS_DEAD     1
 +
        u64 mode_select_num_blocks;
        u32 mode_select_block_len;
  };
  
  struct nvme_ctrl_ops {
+       struct module *module;
        int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@@ -146,9 -148,9 +155,9 @@@ static inline bool nvme_io_incapable(st
        u32 val = 0;
  
        if (ctrl->ops->io_incapable(ctrl))
 -              return false;
 +              return true;
        if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
 -              return false;
 +              return true;
        return val & NVME_CSTS_CFS;
  }
  
@@@ -247,7 -249,6 +256,7 @@@ void nvme_remove_namespaces(struct nvme
  
  void nvme_stop_queues(struct nvme_ctrl *ctrl);
  void nvme_start_queues(struct nvme_ctrl *ctrl);
 +void nvme_kill_queues(struct nvme_ctrl *ctrl);
  
  struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, unsigned int flags);
@@@ -255,7 -256,8 +264,8 @@@ void nvme_requeue_req(struct request *r
  int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
  int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-               void *buffer, unsigned bufflen,  u32 *result, unsigned timeout);
+               struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+               unsigned timeout);
  int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                void __user *ubuffer, unsigned bufflen, u32 *result,
                unsigned timeout);
@@@ -273,8 -275,6 +283,6 @@@ int nvme_set_features(struct nvme_ctrl 
                        dma_addr_t dma_addr, u32 *result);
  int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
  
- extern spinlock_t dev_list_lock;
  struct sg_io_hdr;
  
  int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
diff --combined drivers/nvme/host/pci.c
index 680f5780750cffa45efc93101fb60eb744fdd04c,e9f18e1d73e59fb502e2116ac2e27f46e10e73c4..f8db70ae172d9f558bd14916ca9f05d461232a17
@@@ -27,7 -27,6 +27,6 @@@
  #include <linux/interrupt.h>
  #include <linux/io.h>
  #include <linux/kdev_t.h>
- #include <linux/kthread.h>
  #include <linux/kernel.h>
  #include <linux/mm.h>
  #include <linux/module.h>
@@@ -39,6 -38,7 +38,7 @@@
  #include <linux/sched.h>
  #include <linux/slab.h>
  #include <linux/t10-pi.h>
+ #include <linux/timer.h>
  #include <linux/types.h>
  #include <linux/io-64-nonatomic-lo-hi.h>
  #include <asm/unaligned.h>
  #define NVME_NR_AEN_COMMANDS  1
  #define NVME_AQ_BLKMQ_DEPTH   (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
  
- unsigned char admin_timeout = 60;
- module_param(admin_timeout, byte, 0644);
- MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
- unsigned char nvme_io_timeout = 30;
- module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
- MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
- unsigned char shutdown_timeout = 5;
- module_param(shutdown_timeout, byte, 0644);
- MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
  static int use_threaded_interrupts;
  module_param(use_threaded_interrupts, int, 0);
  
@@@ -76,23 -64,20 +64,19 @@@ static bool use_cmb_sqes = true
  module_param(use_cmb_sqes, bool, 0644);
  MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
  
- static LIST_HEAD(dev_list);
- static struct task_struct *nvme_thread;
  static struct workqueue_struct *nvme_workq;
- static wait_queue_head_t nvme_kthread_wait;
  
  struct nvme_dev;
  struct nvme_queue;
  
  static int nvme_reset(struct nvme_dev *dev);
  static void nvme_process_cq(struct nvme_queue *nvmeq);
 -static void nvme_remove_dead_ctrl(struct nvme_dev *dev);
  static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
  
  /*
   * Represents an NVM Express device.  Each nvme_dev is a PCI function.
   */
  struct nvme_dev {
-       struct list_head node;
        struct nvme_queue **queues;
        struct blk_mq_tag_set tagset;
        struct blk_mq_tag_set admin_tagset;
        struct work_struct reset_work;
        struct work_struct scan_work;
        struct work_struct remove_work;
+       struct work_struct async_work;
+       struct timer_list watchdog_timer;
        struct mutex shutdown_lock;
        bool subsystem;
        void __iomem *cmb;
        unsigned long flags;
  
  #define NVME_CTRL_RESETTING    0
 +#define NVME_CTRL_REMOVING     1
  
        struct nvme_ctrl ctrl;
        struct completion ioq_wait;
@@@ -148,7 -134,6 +134,6 @@@ struct nvme_queue 
        u32 __iomem *q_db;
        u16 q_depth;
        s16 cq_vector;
-       u16 sq_head;
        u16 sq_tail;
        u16 cq_head;
        u16 qid;
@@@ -286,34 -271,26 +271,37 @@@ static int nvme_init_request(void *data
        return 0;
  }
  
 +static void nvme_queue_scan(struct nvme_dev *dev)
 +{
 +      /*
 +       * Do not queue new scan work when a controller is reset during
 +       * removal.
 +       */
 +      if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
 +              return;
 +      queue_work(nvme_workq, &dev->scan_work);
 +}
 +
  static void nvme_complete_async_event(struct nvme_dev *dev,
                struct nvme_completion *cqe)
  {
        u16 status = le16_to_cpu(cqe->status) >> 1;
        u32 result = le32_to_cpu(cqe->result);
  
-       if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
+       if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
                ++dev->ctrl.event_limit;
+               queue_work(nvme_workq, &dev->async_work);
+       }
        if (status != NVME_SC_SUCCESS)
                return;
  
        switch (result & 0xff07) {
        case NVME_AER_NOTICE_NS_CHANGED:
-               dev_info(dev->dev, "rescanning\n");
+               dev_info(dev->ctrl.device, "rescanning\n");
 -              queue_work(nvme_workq, &dev->scan_work);
 +              nvme_queue_scan(dev);
        default:
-               dev_warn(dev->dev, "async event result %08x\n", result);
+               dev_warn(dev->ctrl.device, "async event result %08x\n", result);
        }
  }
  
@@@ -689,14 -666,6 +677,14 @@@ static int nvme_queue_rq(struct blk_mq_
        blk_mq_start_request(req);
  
        spin_lock_irq(&nvmeq->q_lock);
 +      if (unlikely(nvmeq->cq_vector < 0)) {
 +              if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
 +                      ret = BLK_MQ_RQ_QUEUE_BUSY;
 +              else
 +                      ret = BLK_MQ_RQ_QUEUE_ERROR;
 +              spin_unlock_irq(&nvmeq->q_lock);
 +              goto out;
 +      }
        __nvme_submit_cmd(nvmeq, &cmnd);
        nvme_process_cq(nvmeq);
        spin_unlock_irq(&nvmeq->q_lock);
@@@ -727,7 -696,7 +715,7 @@@ static void nvme_complete_rq(struct req
        }
  
        if (unlikely(iod->aborted)) {
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                        "completing aborted command with status: %04x\n",
                        req->errors);
        }
@@@ -749,7 -718,6 +737,6 @@@ static void __nvme_process_cq(struct nv
  
                if ((status & 1) != phase)
                        break;
-               nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
                if (++head == nvmeq->q_depth) {
                        head = 0;
                        phase = !phase;
                        *tag = -1;
  
                if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
-                       dev_warn(nvmeq->q_dmadev,
+                       dev_warn(nvmeq->dev->ctrl.device,
                                "invalid id %d completed on queue %d\n",
                                cqe.command_id, le16_to_cpu(cqe.sq_id));
                        continue;
                }
  
                req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
-               if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
-                       u32 result = le32_to_cpu(cqe.result);
-                       req->special = (void *)(uintptr_t)result;
-               }
+               if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
+                       memcpy(req->special, &cqe, sizeof(cqe));
                blk_mq_complete_request(req, status >> 1);
  
        }
@@@ -846,15 -812,22 +831,22 @@@ static int nvme_poll(struct blk_mq_hw_c
        return 0;
  }
  
- static void nvme_submit_async_event(struct nvme_dev *dev)
+ static void nvme_async_event_work(struct work_struct *work)
  {
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, async_work);
+       struct nvme_queue *nvmeq = dev->queues[0];
        struct nvme_command c;
  
        memset(&c, 0, sizeof(c));
        c.common.opcode = nvme_admin_async_event;
-       c.common.command_id = NVME_AQ_BLKMQ_DEPTH + --dev->ctrl.event_limit;
  
-       __nvme_submit_cmd(dev->queues[0], &c);
+       spin_lock_irq(&nvmeq->q_lock);
+       while (dev->ctrl.event_limit > 0) {
+               c.common.command_id = NVME_AQ_BLKMQ_DEPTH +
+                       --dev->ctrl.event_limit;
+               __nvme_submit_cmd(nvmeq, &c);
+       }
+       spin_unlock_irq(&nvmeq->q_lock);
  }
  
  static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@@ -924,12 -897,10 +916,10 @@@ static void abort_endio(struct request 
  {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = iod->nvmeq;
-       u32 result = (u32)(uintptr_t)req->special;
        u16 status = req->errors;
  
-       dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
+       dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status);
        atomic_inc(&nvmeq->dev->ctrl.abort_limit);
        blk_mq_free_request(req);
  }
  
@@@ -948,7 -919,7 +938,7 @@@ static enum blk_eh_timer_return nvme_ti
         * shutdown, so we return BLK_EH_HANDLED.
         */
        if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                         "I/O %d QID %d timeout, disable controller\n",
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
         * returned to the driver, or if this is the admin queue.
         */
        if (!nvmeq->qid || iod->aborted) {
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                         "I/O %d QID %d timeout, reset controller\n",
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
        cmd.abort.cid = req->tag;
        cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
  
-       dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
-                                req->tag, nvmeq->qid);
+       dev_warn(nvmeq->dev->ctrl.device,
+               "I/O %d QID %d timeout, aborting\n",
+                req->tag, nvmeq->qid);
  
        abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
                        BLK_MQ_REQ_NOWAIT);
@@@ -1018,7 -990,7 +1009,7 @@@ static void nvme_cancel_queue_ios(struc
        if (!blk_mq_request_started(req))
                return;
  
-       dev_dbg_ratelimited(nvmeq->q_dmadev,
 -      dev_warn(nvmeq->dev->ctrl.device,
++      dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
                 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
  
        status = NVME_SC_ABORT_REQ;
@@@ -1173,9 -1145,6 +1164,6 @@@ static struct nvme_queue *nvme_alloc_qu
        nvmeq->qid = qid;
        nvmeq->cq_vector = -1;
        dev->queues[qid] = nvmeq;
-       /* make sure queue descriptor is set before queue count, for kthread */
-       mb();
        dev->queue_count++;
  
        return nvmeq;
@@@ -1264,12 -1233,6 +1252,12 @@@ static struct blk_mq_ops nvme_mq_ops = 
  static void nvme_dev_remove_admin(struct nvme_dev *dev)
  {
        if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
 +              /*
 +               * If the controller was reset during removal, it's possible
 +               * user requests may be waiting on a stopped queue. Start the
 +               * queue to flush these to completion.
 +               */
 +              blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
                blk_cleanup_queue(dev->ctrl.admin_q);
                blk_mq_free_tag_set(&dev->admin_tagset);
        }
@@@ -1360,53 -1323,31 +1348,31 @@@ static int nvme_configure_admin_queue(s
        return result;
  }
  
- static int nvme_kthread(void *data)
- {
-       struct nvme_dev *dev, *next;
-       while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               spin_lock(&dev_list_lock);
-               list_for_each_entry_safe(dev, next, &dev_list, node) {
-                       int i;
-                       u32 csts = readl(dev->bar + NVME_REG_CSTS);
-                       /*
-                        * Skip controllers currently under reset.
-                        */
-                       if (work_pending(&dev->reset_work) || work_busy(&dev->reset_work))
-                               continue;
-                       if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
-                                                       csts & NVME_CSTS_CFS) {
-                               if (queue_work(nvme_workq, &dev->reset_work)) {
-                                       dev_warn(dev->dev,
-                                               "Failed status: %x, reset controller\n",
-                                               readl(dev->bar + NVME_REG_CSTS));
-                               }
-                               continue;
-                       }
-                       for (i = 0; i < dev->queue_count; i++) {
-                               struct nvme_queue *nvmeq = dev->queues[i];
-                               if (!nvmeq)
-                                       continue;
-                               spin_lock_irq(&nvmeq->q_lock);
-                               nvme_process_cq(nvmeq);
-                               while (i == 0 && dev->ctrl.event_limit > 0)
-                                       nvme_submit_async_event(dev);
-                               spin_unlock_irq(&nvmeq->q_lock);
-                       }
+ static void nvme_watchdog_timer(unsigned long data)
+ {
+       struct nvme_dev *dev = (struct nvme_dev *)data;
+       u32 csts = readl(dev->bar + NVME_REG_CSTS);
+       /*
+        * Skip controllers currently under reset.
+        */
+       if (!work_pending(&dev->reset_work) && !work_busy(&dev->reset_work) &&
+           ((csts & NVME_CSTS_CFS) ||
+            (dev->subsystem && (csts & NVME_CSTS_NSSRO)))) {
+               if (queue_work(nvme_workq, &dev->reset_work)) {
+                       dev_warn(dev->dev,
+                               "Failed status: 0x%x, reset controller.\n",
+                               csts);
                }
-               spin_unlock(&dev_list_lock);
-               schedule_timeout(round_jiffies_relative(HZ));
+               return;
        }
-       return 0;
+       mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
  }
  
  static int nvme_create_io_queues(struct nvme_dev *dev)
  {
-       unsigned i;
+       unsigned i, max;
        int ret = 0;
  
        for (i = dev->queue_count; i <= dev->max_qid; i++) {
                }
        }
  
-       for (i = dev->online_queues; i <= dev->queue_count - 1; i++) {
+       max = min(dev->max_qid, dev->queue_count - 1);
+       for (i = dev->online_queues; i <= max; i++) {
                ret = nvme_create_queue(dev->queues[i], i);
                if (ret) {
                        nvme_free_queues(dev, i);
@@@ -1507,7 -1449,8 +1474,8 @@@ static int nvme_setup_io_queues(struct 
         * access to the admin queue, as that might be only way to fix them up.
         */
        if (result > 0) {
-               dev_err(dev->dev, "Could not set queue count (%d)\n", result);
+               dev_err(dev->ctrl.device,
+                       "Could not set queue count (%d)\n", result);
                nr_io_queues = 0;
                result = 0;
        }
                adminq->cq_vector = -1;
                goto free_queues;
        }
-       /* Free previously allocated queues that are no longer usable */
-       nvme_free_queues(dev, nr_io_queues + 1);
        return nvme_create_io_queues(dev);
  
   free_queues:
@@@ -1709,15 -1649,21 +1674,21 @@@ static int nvme_dev_add(struct nvme_de
                if (blk_mq_alloc_tag_set(&dev->tagset))
                        return 0;
                dev->ctrl.tagset = &dev->tagset;
+       } else {
+               blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+               /* Free previously allocated queues that are no longer usable */
+               nvme_free_queues(dev, dev->online_queues);
        }
 -      queue_work(nvme_workq, &dev->scan_work);
 +      nvme_queue_scan(dev);
        return 0;
  }
  
 -static int nvme_dev_map(struct nvme_dev *dev)
 +static int nvme_pci_enable(struct nvme_dev *dev)
  {
        u64 cap;
 -      int bars, result = -ENOMEM;
 +      int result = -ENOMEM;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
  
        if (pci_enable_device_mem(pdev))
  
        dev->entry[0].vector = pdev->irq;
        pci_set_master(pdev);
 -      bars = pci_select_bars(pdev, IORESOURCE_MEM);
 -      if (!bars)
 -              goto disable_pci;
 -
 -      if (pci_request_selected_regions(pdev, bars, "nvme"))
 -              goto disable_pci;
  
        if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
            dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
                goto disable;
  
 -      dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
 -      if (!dev->bar)
 -              goto disable;
 -
        if (readl(dev->bar + NVME_REG_CSTS) == -1) {
                result = -ENODEV;
 -              goto unmap;
 +              goto disable;
        }
  
        /*
        if (!pdev->irq) {
                result = pci_enable_msix(pdev, dev->entry, 1);
                if (result < 0)
 -                      goto unmap;
 +                      goto disable;
        }
  
        cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
        pci_save_state(pdev);
        return 0;
  
 - unmap:
 -      iounmap(dev->bar);
 -      dev->bar = NULL;
   disable:
 -      pci_release_regions(pdev);
 - disable_pci:
        pci_disable_device(pdev);
        return result;
  }
  
  static void nvme_dev_unmap(struct nvme_dev *dev)
 +{
 +      if (dev->bar)
 +              iounmap(dev->bar);
 +      pci_release_regions(to_pci_dev(dev->dev));
 +}
 +
 +static void nvme_pci_disable(struct nvme_dev *dev)
  {
        struct pci_dev *pdev = to_pci_dev(dev->dev);
  
        else if (pdev->msix_enabled)
                pci_disable_msix(pdev);
  
 -      if (dev->bar) {
 -              iounmap(dev->bar);
 -              dev->bar = NULL;
 -              pci_release_regions(pdev);
 -      }
 -
        if (pci_is_enabled(pdev)) {
                pci_disable_pcie_error_reporting(pdev);
                pci_disable_device(pdev);
        }
  }
  
- static int nvme_dev_list_add(struct nvme_dev *dev)
- {
-       bool start_thread = false;
-       spin_lock(&dev_list_lock);
-       if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
-               start_thread = true;
-               nvme_thread = NULL;
-       }
-       list_add(&dev->node, &dev_list);
-       spin_unlock(&dev_list_lock);
-       if (start_thread) {
-               nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
-               wake_up_all(&nvme_kthread_wait);
-       } else
-               wait_event_killable(nvme_kthread_wait, nvme_thread);
-       if (IS_ERR_OR_NULL(nvme_thread))
-               return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
-       return 0;
- }
- /*
- * Remove the node from the device list and check
- * for whether or not we need to stop the nvme_thread.
- */
- static void nvme_dev_list_remove(struct nvme_dev *dev)
- {
-       struct task_struct *tmp = NULL;
-       spin_lock(&dev_list_lock);
-       list_del_init(&dev->node);
-       if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
-               tmp = nvme_thread;
-               nvme_thread = NULL;
-       }
-       spin_unlock(&dev_list_lock);
-       if (tmp)
-               kthread_stop(tmp);
- }
  static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
  {
        int i;
        u32 csts = -1;
  
-       nvme_dev_list_remove(dev);
+       del_timer_sync(&dev->watchdog_timer);
  
        mutex_lock(&dev->shutdown_lock);
 -      if (dev->bar) {
 +      if (pci_is_enabled(to_pci_dev(dev->dev))) {
                nvme_stop_queues(&dev->ctrl);
                csts = readl(dev->bar + NVME_REG_CSTS);
        }
                nvme_disable_io_queues(dev);
                nvme_disable_admin_queue(dev, shutdown);
        }
 -      nvme_dev_unmap(dev);
 +      nvme_pci_disable(dev);
  
        for (i = dev->queue_count - 1; i >= 0; i--)
                nvme_clear_queue(dev->queues[i]);
@@@ -1905,20 -1821,10 +1832,20 @@@ static void nvme_pci_free_ctrl(struct n
        kfree(dev);
  }
  
-       dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
 +static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
 +{
++      dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
 +
 +      kref_get(&dev->ctrl.kref);
 +      nvme_dev_disable(dev, false);
 +      if (!schedule_work(&dev->remove_work))
 +              nvme_put_ctrl(&dev->ctrl);
 +}
 +
  static void nvme_reset_work(struct work_struct *work)
  {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
 -      int result;
 +      int result = -ENODEV;
  
        if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
                goto out;
         * If we're called to reset a live controller first shut it down before
         * moving on.
         */
 -      if (dev->bar)
 +      if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
                nvme_dev_disable(dev, false);
  
        set_bit(NVME_CTRL_RESETTING, &dev->flags);
  
 -      result = nvme_dev_map(dev);
 +      result = nvme_pci_enable(dev);
        if (result)
                goto out;
  
        result = nvme_configure_admin_queue(dev);
        if (result)
 -              goto unmap;
 +              goto out;
  
        nvme_init_queue(dev->queues[0], 0);
        result = nvme_alloc_admin_tags(dev);
        if (result)
 -              goto disable;
 +              goto out;
  
        result = nvme_init_identify(&dev->ctrl);
        if (result)
 -              goto free_tags;
 +              goto out;
  
        result = nvme_setup_io_queues(dev);
        if (result)
 -              goto free_tags;
 +              goto out;
  
        dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
+       queue_work(nvme_workq, &dev->async_work);
  
-       result = nvme_dev_list_add(dev);
-       if (result)
-               goto out;
+       mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
  
        /*
         * Keep the controller around but remove all namespaces if we don't have
         * any working I/O queue.
         */
        if (dev->online_queues < 2) {
-               dev_warn(dev->dev, "IO queues not created\n");
+               dev_warn(dev->ctrl.device, "IO queues not created\n");
                nvme_remove_namespaces(&dev->ctrl);
        } else {
                nvme_start_queues(&dev->ctrl);
        clear_bit(NVME_CTRL_RESETTING, &dev->flags);
        return;
  
 - free_tags:
 -      nvme_dev_remove_admin(dev);
 -      blk_put_queue(dev->ctrl.admin_q);
 -      dev->ctrl.admin_q = NULL;
 -      dev->queues[0]->tags = NULL;
 - disable:
 -      nvme_disable_admin_queue(dev, false);
 - unmap:
 -      nvme_dev_unmap(dev);
   out:
 -      nvme_remove_dead_ctrl(dev);
 +      nvme_remove_dead_ctrl(dev, result);
  }
  
  static void nvme_remove_dead_ctrl_work(struct work_struct *work)
        struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
        struct pci_dev *pdev = to_pci_dev(dev->dev);
  
 +      nvme_kill_queues(&dev->ctrl);
        if (pci_get_drvdata(pdev))
                pci_stop_and_remove_bus_device_locked(pdev);
        nvme_put_ctrl(&dev->ctrl);
  }
  
 -static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
 -{
 -      dev_warn(dev->ctrl.device, "Removing after probe failure\n");
 -      kref_get(&dev->ctrl.kref);
 -      if (!schedule_work(&dev->remove_work))
 -              nvme_put_ctrl(&dev->ctrl);
 -}
 -
  static int nvme_reset(struct nvme_dev *dev)
  {
        if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
@@@ -2032,6 -1953,7 +1958,7 @@@ static int nvme_pci_reset_ctrl(struct n
  }
  
  static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+       .module                 = THIS_MODULE,
        .reg_read32             = nvme_pci_reg_read32,
        .reg_write32            = nvme_pci_reg_write32,
        .reg_read64             = nvme_pci_reg_read64,
        .free_ctrl              = nvme_pci_free_ctrl,
  };
  
 +static int nvme_dev_map(struct nvme_dev *dev)
 +{
 +      int bars;
 +      struct pci_dev *pdev = to_pci_dev(dev->dev);
 +
 +      bars = pci_select_bars(pdev, IORESOURCE_MEM);
 +      if (!bars)
 +              return -ENODEV;
 +      if (pci_request_selected_regions(pdev, bars, "nvme"))
 +              return -ENODEV;
 +
 +      dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
 +      if (!dev->bar)
 +              goto release;
 +
 +       return 0;
 +  release:
 +       pci_release_regions(pdev);
 +       return -ENODEV;
 +}
 +
  static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  {
        int node, result = -ENOMEM;
        dev->dev = get_device(&pdev->dev);
        pci_set_drvdata(pdev, dev);
  
-       INIT_LIST_HEAD(&dev->node);
 +      result = nvme_dev_map(dev);
 +      if (result)
 +              goto free;
 +
        INIT_WORK(&dev->scan_work, nvme_dev_scan);
        INIT_WORK(&dev->reset_work, nvme_reset_work);
        INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
+       INIT_WORK(&dev->async_work, nvme_async_event_work);
+       setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
+               (unsigned long)dev);
        mutex_init(&dev->shutdown_lock);
        init_completion(&dev->ioq_wait);
  
        if (result)
                goto release_pools;
  
+       dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
        queue_work(nvme_workq, &dev->reset_work);
        return 0;
  
        nvme_release_prp_pools(dev);
   put_pci:
        put_device(dev->dev);
 +      nvme_dev_unmap(dev);
   free:
        kfree(dev->queues);
        kfree(dev->entry);
@@@ -2136,27 -2036,23 +2067,30 @@@ static void nvme_shutdown(struct pci_de
        nvme_dev_disable(dev, true);
  }
  
 +/*
 + * The driver's remove may be called on a device in a partially initialized
 + * state. This function must not have any dependencies on the device state in
 + * order to proceed.
 + */
  static void nvme_remove(struct pci_dev *pdev)
  {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
  
+       del_timer_sync(&dev->watchdog_timer);
 +      set_bit(NVME_CTRL_REMOVING, &dev->flags);
        pci_set_drvdata(pdev, NULL);
 -      flush_work(&dev->reset_work);
+       flush_work(&dev->async_work);
        flush_work(&dev->scan_work);
        nvme_remove_namespaces(&dev->ctrl);
        nvme_uninit_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, true);
 +      flush_work(&dev->reset_work);
        nvme_dev_remove_admin(dev);
        nvme_free_queues(dev, 0);
        nvme_release_cmb(dev);
        nvme_release_prp_pools(dev);
 +      nvme_dev_unmap(dev);
        nvme_put_ctrl(&dev->ctrl);
  }
  
@@@ -2192,7 -2088,7 +2126,7 @@@ static pci_ers_result_t nvme_error_dete
         * shutdown the controller to quiesce. The controller will be restarted
         * after the slot reset through driver's slot_reset callback.
         */
-       dev_warn(&pdev->dev, "error detected: state:%d\n", state);
+       dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
        switch (state) {
        case pci_channel_io_normal:
                return PCI_ERS_RESULT_CAN_RECOVER;
@@@ -2209,7 -2105,7 +2143,7 @@@ static pci_ers_result_t nvme_slot_reset
  {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
  
-       dev_info(&pdev->dev, "restart after slot reset\n");
+       dev_info(dev->ctrl.device, "restart after slot reset\n");
        pci_restore_state(pdev);
        queue_work(nvme_workq, &dev->reset_work);
        return PCI_ERS_RESULT_RECOVERED;
@@@ -2232,7 -2128,8 +2166,8 @@@ static const struct pci_error_handlers 
  
  static const struct pci_device_id nvme_id_table[] = {
        { PCI_VDEVICE(INTEL, 0x0953),
-               .driver_data = NVME_QUIRK_STRIPE_SIZE, },
+               .driver_data = NVME_QUIRK_STRIPE_SIZE |
+                               NVME_QUIRK_DISCARD_ZEROES, },
        { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
                .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
@@@ -2257,34 -2154,20 +2192,20 @@@ static int __init nvme_init(void
  {
        int result;
  
-       init_waitqueue_head(&nvme_kthread_wait);
        nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
        if (!nvme_workq)
                return -ENOMEM;
  
-       result = nvme_core_init();
-       if (result < 0)
-               goto kill_workq;
        result = pci_register_driver(&nvme_driver);
        if (result)
-               goto core_exit;
-       return 0;
-  core_exit:
-       nvme_core_exit();
-  kill_workq:
-       destroy_workqueue(nvme_workq);
+               destroy_workqueue(nvme_workq);
        return result;
  }
  
  static void __exit nvme_exit(void)
  {
        pci_unregister_driver(&nvme_driver);
-       nvme_core_exit();
        destroy_workqueue(nvme_workq);
-       BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
        _nvme_check_size();
  }
  
diff --combined include/linux/lightnvm.h
index 2190419bdf0a04e549b64946037d8d61fd025781,8f8a74328f20b387d1b5e3fe440b7256c233bd9b..c3c43184a787027abd104bc0e46fbbbeb071b212
@@@ -92,9 -92,9 +92,9 @@@ enum 
        NVM_ADDRMODE_CHANNEL    = 1,
  
        /* Plane programming mode for LUN */
-       NVM_PLANE_SINGLE        = 0,
-       NVM_PLANE_DOUBLE        = 1,
-       NVM_PLANE_QUAD          = 2,
+       NVM_PLANE_SINGLE        = 1,
+       NVM_PLANE_DOUBLE        = 2,
+       NVM_PLANE_QUAD          = 4,
  
        /* Status codes */
        NVM_RSP_SUCCESS         = 0x0,
        /* Memory types */
        NVM_ID_FMTYPE_SLC       = 0,
        NVM_ID_FMTYPE_MLC       = 1,
 +
 +      /* Device capabilities */
 +      NVM_ID_DCAP_BBLKMGMT    = 0x1,
 +      NVM_UD_DCAP_ECC         = 0x2,
  };
  
  struct nvm_id_lp_mlc {
@@@ -341,8 -337,8 +341,8 @@@ struct nvm_dev 
        int lps_per_blk;
        int *lptbl;
  
-       unsigned long total_pages;
        unsigned long total_blocks;
+       unsigned long total_secs;
        int nr_luns;
        unsigned max_pages_per_blk;