Merge tag 'v3.5-rc7' into regulator-drivers
authorMark Brown <broonie@opensource.wolfsonmicro.com>
Sun, 15 Jul 2012 20:49:21 +0000 (21:49 +0100)
committerMark Brown <broonie@opensource.wolfsonmicro.com>
Sun, 15 Jul 2012 20:49:21 +0000 (21:49 +0100)
Linux 3.5-rc7

351 files changed:
Documentation/ABI/testing/sysfs-block-rssd
Documentation/ABI/testing/sysfs-class-mtd
Documentation/DocBook/media/v4l/controls.xml
Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
Documentation/device-mapper/verity.txt
Documentation/devicetree/bindings/input/fsl-mma8450.txt
Documentation/devicetree/bindings/mfd/mc13xxx.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/prctl/no_new_privs.txt [new file with mode: 0644]
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/atomic.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/thread_info.h
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/signal.c
arch/arm/kernel/signal.h
arch/arm/kernel/traps.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mach-dove/include/mach/bridge-regs.h
arch/arm/mach-dove/include/mach/dove.h
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-imx/clk-imx35.c
arch/arm/mach-imx/mach-imx27_visstrim_m10.c
arch/arm/mach-mmp/include/mach/gpio-pxa.h [deleted file]
arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
arch/arm/mach-mxs/mach-apx4devkit.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/clockdomain.h
arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
arch/arm/mach-omap2/clockdomains44xx_data.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-s3c24xx/clock-s3c2440.c
arch/arm/mach-shmobile/platsmp.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/timer.c
arch/arm/mach-versatile/pci.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/devs.c
arch/arm/plat-samsung/s5p-clock.c
arch/h8300/include/asm/pgtable.h
arch/h8300/include/asm/uaccess.h
arch/h8300/kernel/signal.c
arch/h8300/kernel/time.c
arch/mips/pci/pci-lantiq.c
arch/mn10300/include/asm/ptrace.h
arch/mn10300/include/asm/thread_info.h
arch/mn10300/include/asm/timex.h
arch/mn10300/kernel/cevt-mn10300.c
arch/mn10300/kernel/internal.h
arch/mn10300/kernel/irq.c
arch/mn10300/kernel/traps.c
arch/mn10300/mm/dma-alloc.c
arch/mn10300/unit-asb2303/include/unit/timex.h
arch/mn10300/unit-asb2303/smc91111.c
arch/mn10300/unit-asb2305/include/unit/timex.h
arch/mn10300/unit-asb2305/unit-init.c
arch/mn10300/unit-asb2364/include/unit/timex.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/kernel/irq.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/cell/pervasive.c
arch/powerpc/platforms/pseries/processor_idle.c
arch/powerpc/xmon/xmon.c
arch/sh/include/asm/io_noioport.h
arch/sh/kernel/cpu/sh3/serial-sh7720.c
arch/tile/kernel/backtrace.c
arch/um/drivers/mconsole_kern.c
arch/x86/kernel/vsyscall_64.c
arch/x86/kvm/mmu.c
arch/xtensa/kernel/process.c
block/blk-cgroup.c
block/blk-core.c
block/blk-timeout.c
block/cfq-iosched.c
block/scsi_ioctl.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/processor_core.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_req.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/umem.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkfront.c
drivers/clk/clk.c
drivers/gpio/Kconfig
drivers/gpio/devres.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-sta2x11.c
drivers/gpio/gpio-tps65910.c
drivers/gpio/gpio-wm8994.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/si.c
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hwmon/it87.c
drivers/hwspinlock/hwspinlock_core.c
drivers/input/joystick/as5011.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/mcs_touchkey.c
drivers/input/keyboard/mpr121_touchkey.c
drivers/input/keyboard/qt1070.c
drivers/input/keyboard/tca6416-keypad.c
drivers/input/keyboard/tca8418_keypad.c
drivers/input/keyboard/tnetv107x-keypad.c
drivers/input/misc/ad714x.c
drivers/input/misc/dm355evm_keys.c
drivers/input/mouse/bcm5974.c
drivers/input/tablet/wacom_sys.c
drivers/input/touchscreen/ad7879.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/bu21013_ts.c
drivers/input/touchscreen/cy8ctmg110_ts.c
drivers/input/touchscreen/intel-mid-touch.c
drivers/input/touchscreen/pixcir_i2c_ts.c
drivers/input/touchscreen/tnetv107x-ts.c
drivers/input/touchscreen/tsc2005.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/tegra-smmu.c
drivers/leds/ledtrig-heartbeat.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/persistent-data/dm-space-map-checker.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb/dvb-core/dvbdev.c
drivers/media/rc/winbond-cir.c
drivers/media/video/cx231xx/cx231xx-audio.c
drivers/media/video/cx231xx/cx231xx-vbi.c
drivers/media/video/cx23885/cx23885-cards.c
drivers/media/video/cx23885/cx23885-dvb.c
drivers/media/video/cx23885/cx23885-video.c
drivers/media/video/cx23885/cx23885.h
drivers/media/video/cx25840/cx25840-core.c
drivers/media/video/em28xx/em28xx-cards.c
drivers/media/video/gspca/sn9c20x.c
drivers/media/video/mx2_camera.c
drivers/media/video/omap3isp/isppreview.c
drivers/media/video/pms.c
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-lite.c
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-fimc/fimc-mdevice.h
drivers/media/video/s5p-mfc/s5p_mfc_dec.c
drivers/media/video/s5p-mfc/s5p_mfc_enc.c
drivers/media/video/smiapp/smiapp-core.c
drivers/mfd/Kconfig
drivers/mfd/ab5500-core.h [deleted file]
drivers/mfd/mc13xxx-spi.c
drivers/mfd/omap-usb-host.c
drivers/mfd/palmas.c
drivers/misc/mei/main.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/mmc/core/cd-gpio.c
drivers/mmc/core/mmc.c
drivers/mtd/nand/cafe_nand.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nandsim.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/key.c
drivers/net/wireless/iwlwifi/iwl-mac80211.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/ti/wlcore/Kconfig
drivers/of/platform.c
drivers/pci/pci-driver.c
drivers/pci/pci.c
drivers/pci/quirks.c
drivers/regulator/core.c
drivers/remoteproc/Kconfig
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/rtc-ab8500.c
drivers/rtc/rtc-mxc.c
drivers/rtc/rtc-spear.c
drivers/rtc/rtc-twl.c
drivers/scsi/aic94xx/aic94xx_task.c
drivers/scsi/bnx2i/bnx2i.h
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/qla2xxx/qla_target.c
drivers/target/tcm_fc/tfc_sess.c
drivers/tty/hvc/hvc_opal.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/hub.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/serial/metro-usb.c
drivers/usb/serial/option.c
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/venc.c
drivers/virtio/virtio_balloon.c
fs/btrfs/backref.c
fs/btrfs/ctree.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.h
fs/btrfs/super.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/buffer.c
fs/cifs/connect.c
fs/ecryptfs/kthread.c
fs/ecryptfs/miscdev.c
fs/fat/inode.c
fs/locks.c
fs/nfs/direct.c
fs/nfs/super.c
fs/ocfs2/dlmglue.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/quota_global.c
fs/open.c
fs/ramfs/file-nommu.c
fs/splice.c
include/linux/aio.h
include/linux/blkdev.h
include/linux/bootmem.h
include/linux/gpio.h
include/linux/hrtimer.h
include/linux/input.h
include/linux/kvm_host.h
include/linux/memblock.h
include/linux/mmzone.h
include/linux/pci.h
include/linux/prctl.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rpmsg.h
include/linux/sched.h
include/linux/splice.h
include/net/sctp/structs.h
include/net/sctp/tsnmap.h
include/scsi/libsas.h
include/scsi/scsi_cmnd.h
kernel/cgroup.c
kernel/fork.c
kernel/hrtimer.c
kernel/printk.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/relay.c
kernel/sched/core.c
kernel/sched/idle_task.c
kernel/sched/sched.h
kernel/sys.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
lib/dma-debug.c
mm/bootmem.c
mm/compaction.c
mm/madvise.c
mm/memblock.c
mm/memory_hotplug.c
mm/nobootmem.c
mm/shmem.c
mm/sparse.c
mm/vmscan.c
net/core/dev.c
net/core/skbuff.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nfnetlink.c
net/nfc/nci/ntf.c
net/nfc/rawsock.c
net/sctp/associola.c
net/sctp/output.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/transport.c
net/sctp/tsnmap.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
security/security.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/tlv320aic3x.h
sound/soc/codecs/wm2200.c
sound/usb/endpoint.c
sound/usb/pcm.c
tools/perf/util/map.c
tools/perf/util/session.c
tools/perf/util/trace-event-parse.c
virt/kvm/assigned-dev.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

index 679ce354312281846e4ef2decee0689b0775c985..beef30c046b0d3181bfc353d15704bb4bdaf3da6 100644 (file)
@@ -1,26 +1,5 @@
-What:           /sys/block/rssd*/registers
-Date:           March 2012
-KernelVersion:  3.3
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps below driver information and
-                hardware registers.
-                    - S ACTive
-                    - Command Issue
-                    - Completed
-                    - PORT IRQ STAT
-                    - HOST IRQ STAT
-                    - Allocated
-                    - Commands in Q
-
 What:           /sys/block/rssd*/status
 Date:           April 2012
 KernelVersion:  3.4
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
 Description:    This is a read-only file. Indicates the status of the device.
-
-What:           /sys/block/rssd*/flags
-Date:           May 2012
-KernelVersion:  3.5
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps the flags in port and driver
-                data structure
index db1ad7e34fc3a00e14be0c1045564fe6c98bd37e..938ef71e2035e7c04203c7b65d2a120dbd96944a 100644 (file)
@@ -142,13 +142,14 @@ KernelVersion:    3.4
 Contact:       linux-mtd@lists.infradead.org
 Description:
                This allows the user to examine and adjust the criteria by which
-               mtd returns -EUCLEAN from mtd_read().  If the maximum number of
-               bit errors that were corrected on any single region comprising
-               an ecc step (as reported by the driver) equals or exceeds this
-               value, -EUCLEAN is returned.  Otherwise, absent an error, 0 is
-               returned.  Higher layers (e.g., UBI) use this return code as an
-               indication that an erase block may be degrading and should be
-               scrutinized as a candidate for being marked as bad.
+               mtd returns -EUCLEAN from mtd_read() and mtd_read_oob().  If the
+               maximum number of bit errors that were corrected on any single
+               region comprising an ecc step (as reported by the driver) equals
+               or exceeds this value, -EUCLEAN is returned.  Otherwise, absent
+               an error, 0 is returned.  Higher layers (e.g., UBI) use this
+               return code as an indication that an erase block may be
+               degrading and should be scrutinized as a candidate for being
+               marked as bad.
 
                The initial value may be specified by the flash device driver.
                If not, then the default value is ecc_strength.
@@ -167,7 +168,7 @@ Description:
                block degradation, but high enough to avoid the consequences of
                a persistent return value of -EUCLEAN on devices where sticky
                bitflips occur.  Note that if bitflip_threshold exceeds
-               ecc_strength, -EUCLEAN is never returned by mtd_read().
+               ecc_strength, -EUCLEAN is never returned by the read operations.
                Conversely, if bitflip_threshold is zero, -EUCLEAN is always
                returned, absent a hard error.
 
index 676bc46f9c52a476b3c8c280cca437d756726235..cda0dfb6769aee5d0fcadb8a30b4d19658259bc7 100644 (file)
@@ -3988,7 +3988,7 @@ interface and may change in the future.</para>
            from RGB to Y'CbCr color space.
            </entry>
          </row>
-         <row id = "v4l2-jpeg-chroma-subsampling">
+         <row>
            <entrytbl spanname="descr" cols="2">
              <tbody valign="top">
                <row>
index e3d5afcdafbb5b03cf9994478e3e0d8139442dda..0a4b90fcf2dab77b36ed1e52749040ddcc97a113 100644 (file)
@@ -284,13 +284,6 @@ These controls are described in <xref
            processing controls. These controls are described in <xref
            linkend="image-process-controls" />.</entry>
          </row>
-         <row>
-           <entry><constant>V4L2_CTRL_CLASS_JPEG</constant></entry>
-           <entry>0x9d0000</entry>
-           <entry>The class containing JPEG compression controls.
-These controls are described in <xref
-               linkend="jpeg-controls" />.</entry>
-         </row>
        </tbody>
       </tgroup>
     </table>
index 32e48797a14f80de6ebff7a441e489021bfce58a..9884681535ee36bf03a7d7baaa54abb36360d53d 100644 (file)
@@ -7,39 +7,39 @@ This target is read-only.
 
 Construction Parameters
 =======================
-    <version> <dev> <hash_dev> <hash_start>
+    <version> <dev> <hash_dev>
     <data_block_size> <hash_block_size>
     <num_data_blocks> <hash_start_block>
     <algorithm> <digest> <salt>
 
 <version>
-    This is the version number of the on-disk format.
+    This is the type of the on-disk hash format.
 
     0 is the original format used in the Chromium OS.
-       The salt is appended when hashing, digests are stored continuously and
-       the rest of the block is padded with zeros.
+      The salt is appended when hashing, digests are stored continuously and
+      the rest of the block is padded with zeros.
 
     1 is the current format that should be used for new devices.
-       The salt is prepended when hashing and each digest is
-       padded with zeros to the power of two.
+      The salt is prepended when hashing and each digest is
+      padded with zeros to the power of two.
 
 <dev>
-    This is the device containing the data the integrity of which needs to be
+    This is the device containing data, the integrity of which needs to be
     checked.  It may be specified as a path, like /dev/sdaX, or a device number,
     <major>:<minor>.
 
 <hash_dev>
-    This is the device that that supplies the hash tree data.  It may be
+    This is the device that supplies the hash tree data.  It may be
     specified similarly to the device path and may be the same device.  If the
-    same device is used, the hash_start should be outside of the dm-verity
-    configured device size.
+    same device is used, the hash_start should be outside the configured
+    dm-verity device.
 
 <data_block_size>
-    The block size on a data device.  Each block corresponds to one digest on
-    the hash device.
+    The block size on a data device in bytes.
+    Each block corresponds to one digest on the hash device.
 
 <hash_block_size>
-    The size of a hash block.
+    The size of a hash block in bytes.
 
 <num_data_blocks>
     The number of data blocks on the data device.  Additional blocks are
@@ -65,7 +65,7 @@ Construction Parameters
 Theory of operation
 ===================
 
-dm-verity is meant to be setup as part of a verified boot path.  This
+dm-verity is meant to be set up as part of a verified boot path.  This
 may be anything ranging from a boot using tboot or trustedgrub to just
 booting from a known-good device (like a USB drive or CD).
 
@@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
 has been authenticated in some way (cryptographic signatures, etc).
 After instantiation, all hashes will be verified on-demand during
 disk access.  If they cannot be verified up to the root node of the
-tree, the root hash, then the I/O will fail.  This should identify
+tree, the root hash, then the I/O will fail.  This should detect
 tampering with any data on the device and the hash data.
 
 Cryptographic hashes are used to assert the integrity of the device on a
-per-block basis.  This allows for a lightweight hash computation on first read
-into the page cache.  Block hashes are stored linearly-aligned to the nearest
-block the size of a page.
+per-block basis. This allows for a lightweight hash computation on first read
+into the page cache. Block hashes are stored linearly, aligned to the nearest
+block size.
 
 Hash Tree
 ---------
 
 Each node in the tree is a cryptographic hash.  If it is a leaf node, the hash
-is of some block data on disk.  If it is an intermediary node, then the hash is
-of a number of child nodes.
+of some data block on disk is calculated. If it is an intermediary node,
+the hash of a number of child nodes is calculated.
 
 Each entry in the tree is a collection of neighboring nodes that fit in one
 block.  The number is determined based on block_size and the size of the
@@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
 On-disk format
 ==============
 
-Below is the recommended on-disk format. The verity kernel code does not
-read the on-disk header. It only reads the hash blocks which directly
-follow the header. It is expected that a user-space tool will verify the
-integrity of the verity_header and then call dmsetup with the correct
-parameters. Alternatively, the header can be omitted and the dmsetup
-parameters can be passed via the kernel command-line in a rooted chain
-of trust where the command-line is verified.
+The verity kernel code does not read the verity metadata on-disk header.
+It only reads the hash blocks which directly follow the header.
+It is expected that a user-space tool will verify the integrity of the
+verity header.
 
-The on-disk format is especially useful in cases where the hash blocks
-are on a separate partition. The magic number allows easy identification
-of the partition contents. Alternatively, the hash blocks can be stored
-in the same partition as the data to be verified. In such a configuration
-the filesystem on the partition would be sized a little smaller than
-the full-partition, leaving room for the hash blocks.
-
-struct superblock {
-       uint8_t signature[8]
-               "verity\0\0";
-
-       uint8_t version;
-               1 - current format
-
-       uint8_t data_block_bits;
-               log2(data block size)
-
-       uint8_t hash_block_bits;
-               log2(hash block size)
-
-       uint8_t pad1[1];
-               zero padding
-
-       uint16_t salt_size;
-               big-endian salt size
-
-       uint8_t pad2[2];
-               zero padding
-
-       uint32_t data_blocks_hi;
-               big-endian high 32 bits of the 64-bit number of data blocks
-
-       uint32_t data_blocks_lo;
-               big-endian low 32 bits of the 64-bit number of data blocks
-
-       uint8_t algorithm[16];
-               cryptographic algorithm
-
-       uint8_t salt[384];
-               salt (the salt size is specified above)
-
-       uint8_t pad3[88];
-               zero padding to 512-byte boundary
-}
+Alternatively, the header can be omitted and the dmsetup parameters can
+be passed via the kernel command-line in a rooted chain of trust where
+the command-line is verified.
 
 Directly following the header (and with sector number padded to the next hash
 block boundary) are the hash blocks which are stored a depth at a time
 (starting from the root), sorted in order of increasing index.
 
+The full specification of kernel parameters and on-disk metadata format
+is available at the cryptsetup project's wiki page
+  http://code.google.com/p/cryptsetup/wiki/DMVerity
+
 Status
 ======
 V (for Valid) is returned if every check performed so far was valid.
@@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
 
 Example
 =======
-
-Setup a device:
-  dmsetup create vroot --table \
-    "0 2097152 "\
-    "verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
+Set up a device:
+  # dmsetup create vroot --readonly --table \
+    "0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
     "4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
     "1234000000000000000000000000000000000000000000000000000000000000"
 
 A command line tool veritysetup is available to compute or verify
-the hash tree or activate the kernel driver.  This is available from
-the LVM2 upstream repository and may be supplied as a package called
-device-mapper-verity-tools:
-    git://sources.redhat.com/git/lvm2
-    http://sourceware.org/git/?p=lvm2.git
-    http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
-
-veritysetup -a vroot /dev/sda1 /dev/sda2 \
-       4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+the hash tree or activate the kernel device. This is available from
+the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
+(as a libcryptsetup extension).
+
+Create hash on the device:
+  # veritysetup format /dev/sda1 /dev/sda2
+  ...
+  Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+
+Activate the device:
+  # veritysetup create vroot /dev/sda1 /dev/sda2 \
+    4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
index a00c94ccbdeeb871c440b770de814d11c6f757d7..0b96e5737d3a569ca4acfec137cc6870d1b56880 100644 (file)
@@ -2,6 +2,7 @@
 
 Required properties:
 - compatible : "fsl,mma8450".
+- reg: the I2C address of MMA8450
 
 Example:
 
index 19f6af47a792986c23a9ed033e15e8639ec5c51b..baf07987ae6863c68b39efa5ba1227a327009337 100644 (file)
@@ -46,8 +46,8 @@ Examples:
 
 ecspi@70010000 { /* ECSPI1 */
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */
+                  <&gpio4 25 0>; /* GPIO4_25 */
        status = "okay";
 
        pmic: mc13892@0 {
index c7e404b3ef0515b5527583cae877ab48c5d69595..fea541ee8b34a18fbf1aaef9276c17ef992db830 100644 (file)
@@ -29,6 +29,6 @@ esdhc@70008000 {
        compatible = "fsl,imx51-esdhc";
        reg = <0x70008000 0x4000>;
        interrupts = <2>;
-       cd-gpios = <&gpio0 6 0>; /* GPIO1_6 */
-       wp-gpios = <&gpio0 5 0>; /* GPIO1_5 */
+       cd-gpios = <&gpio1 6 0>; /* GPIO1_6 */
+       wp-gpios = <&gpio1 5 0>; /* GPIO1_5 */
 };
index 7ab9e1a2d8bec19fac2283b5703fae60d2858998..4616fc28ee86e83793550a0ceda9fa25e4b46167 100644 (file)
@@ -19,6 +19,6 @@ ethernet@83fec000 {
        reg = <0x83fec000 0x4000>;
        interrupts = <87>;
        phy-mode = "mii";
-       phy-reset-gpios = <&gpio1 14 0>; /* GPIO2_14 */
+       phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
        local-mac-address = [00 04 9F 01 1B B9];
 };
index 9841057d112bb6e4c46299166ef7c5a9a6da5521..4256a6df9b79355d8c17f5f393c3956d6a5a78ca 100644 (file)
@@ -17,6 +17,6 @@ ecspi@70010000 {
        reg = <0x70010000 0x4000>;
        interrupts = <36>;
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio3 24 0>, /* GPIO3_24 */
+                  <&gpio3 25 0>; /* GPIO3_25 */
 };
index 6eab91747a86a03c45c60d9bdca74fb28215b3a3..db4d3af3643c407ffad6e4ead2dd81d8c4ab36cf 100644 (file)
@@ -3,6 +3,7 @@ Device tree binding vendor prefix registry.  Keep list in alphabetical order.
 This isn't an exhaustive list, but you should add new prefixes to it before
 using them to avoid name-space collisions.
 
+ad     Avionic Design GmbH
 adi    Analog Devices, Inc.
 amcc   Applied Micro Circuits Corporation (APM, formally AMCC)
 apm    Applied Micro Circuits Corporation (APM)
diff --git a/Documentation/prctl/no_new_privs.txt b/Documentation/prctl/no_new_privs.txt
new file mode 100644 (file)
index 0000000..f7be84f
--- /dev/null
@@ -0,0 +1,57 @@
+The execve system call can grant a newly-started program privileges that
+its parent did not have.  The most obvious examples are setuid/setgid
+programs and file capabilities.  To prevent the parent program from
+gaining these privileges as well, the kernel and user code must be
+careful to prevent the parent from doing anything that could subvert the
+child.  For example:
+
+ - The dynamic loader handles LD_* environment variables differently if
+   a program is setuid.
+
+ - chroot is disallowed to unprivileged processes, since it would allow
+   /etc/passwd to be replaced from the point of view of a process that
+   inherited chroot.
+
+ - The exec code has special handling for ptrace.
+
+These are all ad-hoc fixes.  The no_new_privs bit (since Linux 3.5) is a
+new, generic mechanism to make it safe for a process to modify its
+execution environment in a manner that persists across execve.  Any task
+can set no_new_privs.  Once the bit is set, it is inherited across fork,
+clone, and execve and cannot be unset.  With no_new_privs set, execve
+promises not to grant the privilege to do anything that could not have
+been done without the execve call.  For example, the setuid and setgid
+bits will no longer change the uid or gid; file capabilities will not
+add to the permitted set, and LSMs will not relax constraints after
+execve.
+
+To set no_new_privs, use prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0).
+
+Be careful, though: LSMs might also not tighten constraints on exec
+in no_new_privs mode.  (This means that setting up a general-purpose
+service launcher to set no_new_privs before execing daemons may
+interfere with LSM-based sandboxing.)
+
+Note that no_new_privs does not prevent privilege changes that do not
+involve execve.  An appropriately privileged task can still call
+setuid(2) and receive SCM_RIGHTS datagrams.
+
+There are two main use cases for no_new_privs so far:
+
+ - Filters installed for the seccomp mode 2 sandbox persist across
+   execve and can change the behavior of newly-executed programs.
+   Unprivileged users are therefore only allowed to install such filters
+   if no_new_privs is set.
+
+ - By itself, no_new_privs can be used to reduce the attack surface
+   available to an unprivileged user.  If everything running with a
+   given uid has no_new_privs set, then that uid will be unable to
+   escalate its privileges by directly attacking setuid, setgid, and
+   fcap-using binaries; it will need to compromise something without the
+   no_new_privs bit set first.
+
+In the future, other potentially dangerous kernel features could become
+available to unprivileged tasks if no_new_privs is set.  In principle,
+several options to unshare(2) and clone(2) would be safe when
+no_new_privs is set, and no_new_privs + chroot is considerable less
+dangerous than chroot by itself.
index 930126698a0f5b0f6e2c458b53604d581b201063..2c9948379469c1dba5f8c7ef711567067727ff08 100644 (file)
@@ -1930,6 +1930,23 @@ The "pte_enc" field provides a value that can OR'ed into the hash
 PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
 into the hash PTE second double word).
 
+4.75 KVM_IRQFD
+
+Capability: KVM_CAP_IRQFD
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_irqfd (in)
+Returns: 0 on success, -1 on error
+
+Allows setting an eventfd to directly trigger a guest interrupt.
+kvm_irqfd.fd specifies the file descriptor to use as the eventfd and
+kvm_irqfd.gsi specifies the irqchip pin toggled by this event.  When
+an event is tiggered on the eventfd, an interrupt is injected into
+the guest using the specified gsi pin.  The irqfd is removed using
+the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd
+and kvm_irqfd.gsi.
+
+
 5. The kvm_run structure
 ------------------------
 
index eb22272b2116c70cb6a6af438c24ab9f2edad9fb..1b71f6ceae0a11948f4d0896754944f51c119fb9 100644 (file)
@@ -4654,8 +4654,8 @@ L:        netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
+T:     git git://1984.lsi.us.es/nf
+T:     git git://1984.lsi.us.es/nf-next
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
@@ -4857,6 +4857,7 @@ M:        Kevin Hilman <khilman@ti.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/*omap*/*pm*
+F:     drivers/cpufreq/omap-cpufreq.c
 
 OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
 M:     Rajendra Nayak <rnayak@ti.com>
@@ -5909,7 +5910,7 @@ M:        Ingo Molnar <mingo@redhat.com>
 M:     Peter Zijlstra <peterz@infradead.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:     Maintained
-F:     kernel/sched*
+F:     kernel/sched/
 F:     include/linux/sched.h
 
 SCORE ARCHITECTURE
index 81ea15450049e114f74bb0c753955b5b053addb4..aa8e315f26ef9b3aaef3509cf82489280bba382e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index 9854ff4279e0891bfaaa59d8086df7ee247c5003..11828e632532accb5ab386ac9a0ffe173c6b9fdf 100644 (file)
@@ -176,7 +176,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_DEVICEFS=y
 CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_WDM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_LIBUSUAL=y
index 68374ba6a943e9d029246d927b64a52913ea525e..c79f61faa3a55e81ae773b6d9d83532821b24c33 100644 (file)
@@ -243,7 +243,7 @@ typedef struct {
 
 #define ATOMIC64_INIT(i) { (i) }
 
-static inline u64 atomic64_read(atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
 {
        u64 result;
 
index 3d2220498abc2db2d98378c94a3309c71e91ccee..6ddbe446425e11524d927b5cd8b479bcd2419238 100644 (file)
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_CPU_USE_DOMAINS
-#define set_domain(x)                                  \
-       do {                                            \
-       __asm__ __volatile__(                           \
-       "mcr    p15, 0, %0, c3, c0      @ set domain"   \
-         : : "r" (x));                                 \
-       isb();                                          \
-       } while (0)
+static inline void set_domain(unsigned val)
+{
+       asm volatile(
+       "mcr    p15, 0, %0, c3, c0      @ set domain"
+         : : "r" (val));
+       isb();
+}
 
 #define modify_domain(dom,type)                                        \
        do {                                                    \
@@ -78,8 +78,8 @@
        } while (0)
 
 #else
-#define set_domain(x)          do { } while (0)
-#define modify_domain(dom,type)        do { } while (0)
+static inline void set_domain(unsigned val) { }
+static inline void modify_domain(unsigned dom, unsigned type)  { }
 #endif
 
 /*
index b79f8e97f7755f22d82ae20ee00442cd11f7af02..af7b0bda3355d9af850ae722b2b1f55277ef6e67 100644 (file)
@@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_SYSCALL_TRACE      8
 #define TIF_SYSCALL_AUDIT      9
-#define TIF_SYSCALL_RESTARTSYS 10
 #define TIF_POLLING_NRFLAG     16
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
@@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
-#define _TIF_SYSCALL_RESTARTSYS        (1 << TIF_SYSCALL_RESTARTSYS)
 
 /* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                          _TIF_SYSCALL_RESTARTSYS)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
 
 /*
  * Change these and you break ASM code in entry-common.S
index ba32b393b3f0c514c83799687348d52655bfe1da..38c1a3b103a0684b5b579bb74ca07f38bb91eb13 100644 (file)
@@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void)
        TEST_BF_R ("mov pc, r",0,2f,"")
        TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
        TEST_BB(   "sub pc, pc, #1b-2b+8")
-#if __LINUX_ARM_ARCH__ >= 6
-       TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
+#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
+       TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
 #endif
        TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
        TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
index 186c8cb982c543a2cc1631796b34376151b33702..a02eada3aa5d06036027ac1241e652d5032781bd 100644 (file)
@@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event)
             event_requires_mode_exclusion(&event->attr)) {
                pr_debug("ARM performance counters do not support "
                         "mode exclusion\n");
-               return -EPERM;
+               return -EOPNOTSUPP;
        }
 
        /*
index 5700a7ae7f0bc1511ae048d7a3f025c401e5729c..14e38261cd31db9d852db2eb0b8046251a04613d 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/regset.h>
 #include <linux/audit.h>
 #include <linux/tracehook.h>
-#include <linux/unistd.h>
 
 #include <asm/pgtable.h>
 #include <asm/traps.h>
@@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
                audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
                                    regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
 
-       if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
-               scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
        if (!test_thread_flag(TIF_SYSCALL_TRACE))
                return scno;
 
index fd2392a17ac1befb3464b5a0e77960a0f862b4ca..536c5d6b340b7fca2aee9c770000372c38be98bb 100644 (file)
@@ -27,6 +27,7 @@
  */
 #define SWI_SYS_SIGRETURN      (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
 #define SWI_SYS_RT_SIGRETURN   (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RESTART                (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
 
 /*
  * With EABI, the syscall number has to be loaded into r7.
@@ -46,6 +47,18 @@ const unsigned long sigreturn_codes[7] = {
        MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
 };
 
+/*
+ * Either we support OABI only, or we have EABI with the OABI
+ * compat layer enabled.  In the later case we don't know if
+ * user space is EABI or not, and if not we must not clobber r7.
+ * Always using the OABI syscall solves that issue and works for
+ * all those cases.
+ */
+const unsigned long syscall_restart_code[2] = {
+       SWI_SYS_RESTART,        /* swi  __NR_restart_syscall */
+       0xe49df004,             /* ldr  pc, [sp], #4 */
+};
+
 /*
  * atomically swap in the new signal mask, and wait for a signal.
  */
@@ -592,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
                case -ERESTARTNOHAND:
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
-               case -ERESTART_RESTARTBLOCK:
                        regs->ARM_r0 = regs->ARM_ORIG_r0;
                        regs->ARM_pc = restart_addr;
                        break;
+               case -ERESTART_RESTARTBLOCK:
+                       regs->ARM_r0 = -EINTR;
+                       break;
                }
        }
 
@@ -611,14 +626,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
                 * debugger has chosen to restart at a different PC.
                 */
                if (regs->ARM_pc == restart_addr) {
-                       if (retval == -ERESTARTNOHAND ||
-                           retval == -ERESTART_RESTARTBLOCK
+                       if (retval == -ERESTARTNOHAND
                            || (retval == -ERESTARTSYS
                                && !(ka.sa.sa_flags & SA_RESTART))) {
                                regs->ARM_r0 = -EINTR;
                                regs->ARM_pc = continue_addr;
                        }
-                       clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
                }
 
                handle_signal(signr, &ka, &info, regs);
@@ -632,8 +645,29 @@ static void do_signal(struct pt_regs *regs, int syscall)
                 * ignore the restart.
                 */
                if (retval == -ERESTART_RESTARTBLOCK
-                   && regs->ARM_pc == restart_addr)
-                       set_thread_flag(TIF_SYSCALL_RESTARTSYS);
+                   && regs->ARM_pc == continue_addr) {
+                       if (thumb_mode(regs)) {
+                               regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
+                               regs->ARM_pc -= 2;
+                       } else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+                               regs->ARM_r7 = __NR_restart_syscall;
+                               regs->ARM_pc -= 4;
+#else
+                               u32 __user *usp;
+
+                               regs->ARM_sp -= 4;
+                               usp = (u32 __user *)regs->ARM_sp;
+
+                               if (put_user(regs->ARM_pc, usp) == 0) {
+                                       regs->ARM_pc = KERN_RESTART_CODE;
+                               } else {
+                                       regs->ARM_sp += 4;
+                                       force_sigsegv(0, current);
+                               }
+#endif
+                       }
+               }
        }
 
        restore_saved_sigmask();
index 5ff067b7c7522f428b4343832ecb759b7ccf16de..6fcfe8398aa473051fbf72396986a84dc700978f 100644 (file)
@@ -8,5 +8,7 @@
  * published by the Free Software Foundation.
  */
 #define KERN_SIGRETURN_CODE    (CONFIG_VECTORS_BASE + 0x00000500)
+#define KERN_RESTART_CODE      (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
 
 extern const unsigned long sigreturn_codes[7];
+extern const unsigned long syscall_restart_code[2];
index 4928d89758f4ce0dea767acdf9fcf2299dff7833..3647170e9a16ba3aa8838218ed99e74dbc5b7c59 100644 (file)
@@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base)
         */
        memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
               sigreturn_codes, sizeof(sigreturn_codes));
+       memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+              syscall_restart_code, sizeof(syscall_restart_code));
 
        flush_icache_range(vectors, vectors + PAGE_SIZE);
        modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
index 43a31fb06318dc0a1213827a5069b73cac19f5a6..36ff15bbfdd4961afecfef9ea292c6445e9b3836 100644 (file)
@@ -183,7 +183,9 @@ SECTIONS
        }
 #endif
 
+#ifdef CONFIG_SMP
        PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
 
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
index 226949dc4ac04a242c40e215d23818421342b99c..f953bb54aa9d31d590791d97aaaf770aaaecfd93 100644 (file)
@@ -50,5 +50,6 @@
 #define POWER_MANAGEMENT       (BRIDGE_VIRT_BASE | 0x011c)
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE         (BRIDGE_PHYS_BASE | 0x0300)
 
 #endif
index ad1165d488c13f393bf352a00f0e03b62265f3cc..d52b0ef313b7e53c2efa6de67d0ddeddc4790ca5 100644 (file)
@@ -78,6 +78,7 @@
 
 /* North-South Bridge */
 #define BRIDGE_VIRT_BASE       (DOVE_SB_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE       (DOVE_SB_REGS_PHYS_BASE | 0x20000)
 
 /* Cryptographic Engine */
 #define DOVE_CRYPT_PHYS_BASE   (DOVE_SB_REGS_PHYS_BASE | 0x30000)
index e9fafcf163de8982287876a7ca6d6cf94488f472..373c3c00d24cdbbe054a1aae60625d97188e6208 100644 (file)
@@ -119,7 +119,9 @@ static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
                                                struct exynos_pm_domain *pd)
 {
        if (pdev->dev.bus) {
-               if (pm_genpd_add_device(&pd->pd, &pdev->dev))
+               if (!pm_genpd_add_device(&pd->pd, &pdev->dev))
+                       pm_genpd_dev_need_restore(&pdev->dev, true);
+               else
                        pr_info("%s: error in adding %s device to %s power"
                                "domain\n", __func__, dev_name(&pdev->dev),
                                pd->name);
@@ -151,9 +153,12 @@ static __init int exynos4_pm_init_power_domain(void)
        if (of_have_populated_dt())
                return exynos_pm_dt_parse_domains();
 
-       for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++)
-               pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL,
-                               exynos4_pm_domains[idx]->is_off);
+       for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) {
+               struct exynos_pm_domain *pd = exynos4_pm_domains[idx];
+               int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
+
+               pm_genpd_init(&pd->pd, NULL, !on);
+       }
 
 #ifdef CONFIG_S5P_DEV_FIMD0
        exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
index 920a8cc4272609829f941432cf9c10b26b979aba..c6422fb10bae37756693f3323f79df62e4fc932e 100644 (file)
@@ -201,7 +201,6 @@ int __init mx35_clocks_init()
                        pr_err("i.MX35 clk %d: register failed with %ld\n",
                                i, PTR_ERR(clk[i]));
 
-
        clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
        clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
        clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
@@ -264,6 +263,14 @@ int __init mx35_clocks_init()
        clk_prepare_enable(clk[iim_gate]);
        clk_prepare_enable(clk[emi_gate]);
 
+       /*
+        * SCC is needed to boot via mmc after a watchdog reset. The clock code
+        * before conversion to common clk also enabled UART1 (which isn't
+        * handled here and not needed for mmc) and IIM (which is enabled
+        * unconditionally above).
+        */
+       clk_prepare_enable(clk[scc_gate]);
+
        imx_print_silicon_rev("i.MX35", mx35_revision());
 
 #ifdef CONFIG_MXC_USE_EPIT
index f76edb96a48a671322f7a6c30935cfa524477463..ba09552fe5feee5b6720e3b294449e6d46bfeddf 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
-#include <asm/system.h>
+#include <asm/system_info.h>
 #include <mach/common.h>
 #include <mach/iomux-mx27.h>
 
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
deleted file mode 100644 (file)
index 0e135a5..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_GPIO_PXA_H
-#define __ASM_MACH_GPIO_PXA_H
-
-#include <mach/addr-map.h>
-#include <mach/cputype.h>
-#include <mach/irqs.h>
-
-#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
-
-#define BANK_OFF(n)    (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x)    (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
-
-#define gpio_to_bank(gpio)     ((gpio) >> 5)
-
-/* NOTE: these macros are defined here to make optimization of
- * gpio_{get,set}_value() to work when 'gpio' is a constant.
- * Usage of these macros otherwise is no longer recommended,
- * use generic GPIO API whenever possible.
- */
-#define GPIO_bit(gpio) (1 << ((gpio) & 0x1f))
-
-#define GPLR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x00)
-#define GPDR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x0c)
-#define GPSR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x18)
-#define GPCR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x24)
-
-#include <plat/gpio-pxa.h>
-
-#endif /* __ASM_MACH_GPIO_PXA_H */
index c64dbb96dbad53a4264b8559ec531f229b75d08b..eb187e0e059bdbb1459b2ae21d69b6050176cba2 100644 (file)
@@ -31,5 +31,6 @@
 #define IRQ_MASK_HIGH_OFF      0x0014
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE                (BRIDGE_PHYS_BASE | 0x0300)
 
 #endif
index 3674497162e3efa3b2238360fda132e9761a4d21..e807c4c52a0b6331a4e02146f71edc127d95cb7f 100644 (file)
@@ -42,6 +42,7 @@
 #define MV78XX0_CORE0_REGS_PHYS_BASE   0xf1020000
 #define MV78XX0_CORE1_REGS_PHYS_BASE   0xf1024000
 #define MV78XX0_CORE_REGS_VIRT_BASE    0xfe400000
+#define MV78XX0_CORE_REGS_PHYS_BASE    0xfe400000
 #define MV78XX0_CORE_REGS_SIZE         SZ_16K
 
 #define MV78XX0_PCIE_IO_PHYS_BASE(i)   (0xf0800000 + ((i) << 20))
@@ -59,6 +60,7 @@
  * Core-specific peripheral registers.
  */
 #define BRIDGE_VIRT_BASE       (MV78XX0_CORE_REGS_VIRT_BASE)
+#define BRIDGE_PHYS_BASE       (MV78XX0_CORE_REGS_PHYS_BASE)
 
 /*
  * Register Map
index 5e90b9dcdef8789e10d71d2a8500604d53e57eeb..f5f061757deb54371e599730d8f38fc0b15c100e 100644 (file)
@@ -205,6 +205,16 @@ static int apx4devkit_phy_fixup(struct phy_device *phy)
        return 0;
 }
 
+static void __init apx4devkit_fec_phy_clk_enable(void)
+{
+       struct clk *clk;
+
+       /* Enable fec phy clock */
+       clk = clk_get_sys("enet_out", NULL);
+       if (!IS_ERR(clk))
+               clk_prepare_enable(clk);
+}
+
 static void __init apx4devkit_init(void)
 {
        mx28_soc_init();
@@ -225,6 +235,7 @@ static void __init apx4devkit_init(void)
        phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
                        apx4devkit_phy_fixup);
 
+       apx4devkit_fec_phy_clk_enable();
        mx28_add_fec(0, &mx28_fec_pdata);
 
        mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata);
index 8fa2fc3a4c3c51e973eb07c91f3c687c86aca62a..779734d8ba37304417350cd246c52044a0238897 100644 (file)
@@ -494,8 +494,8 @@ static void __init overo_init(void)
 
        regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-       omap_hsmmc_init(mmc);
        overo_i2c_init();
+       omap_hsmmc_init(mmc);
        omap_display_init(&overo_dss_data);
        omap_serial_init();
        omap_sdrc_init(mt46h32m32lf6_sdrc_params,
index f7b58609bad888b6b276a524badf9518e450a709..6227e9505c2db6858c44bf2b9f30e0abe7ec0023 100644 (file)
  *
  * CLKDM_NO_AUTODEPS: Prevent "autodeps" from being added/removed from this
  *     clockdomain.  (Currently, this applies to OMAP3 clockdomains only.)
+ * CLKDM_ACTIVE_WITH_MPU: The PRCM guarantees that this clockdomain is
+ *     active whenever the MPU is active.  True for interconnects and
+ *     the WKUP clockdomains.
  */
 #define CLKDM_CAN_FORCE_SLEEP                  (1 << 0)
 #define CLKDM_CAN_FORCE_WAKEUP                 (1 << 1)
 #define CLKDM_CAN_ENABLE_AUTO                  (1 << 2)
 #define CLKDM_CAN_DISABLE_AUTO                 (1 << 3)
 #define CLKDM_NO_AUTODEPS                      (1 << 4)
+#define CLKDM_ACTIVE_WITH_MPU                  (1 << 5)
 
 #define CLKDM_CAN_HWSUP                (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
 #define CLKDM_CAN_SWSUP                (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
index 839145e1cfbea2bcf706e72be39da268e6778247..4972219653ce85b1bebcad5629ab92d733dfef45 100644 (file)
@@ -88,4 +88,5 @@ struct clockdomain wkup_common_clkdm = {
        .name           = "wkup_clkdm",
        .pwrdm          = { .name = "wkup_pwrdm" },
        .dep_bit        = OMAP_EN_WKUP_SHIFT,
+       .flags          = CLKDM_ACTIVE_WITH_MPU,
 };
index c534258474939e8efe3b96dd1c5ed096bd30f3ab..7f2133abe7d36e601c322fcec0a8f6217b500b46 100644 (file)
@@ -381,7 +381,7 @@ static struct clockdomain l4_wkup_44xx_clkdm = {
        .cm_inst          = OMAP4430_PRM_WKUP_CM_INST,
        .clkdm_offs       = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
        .dep_bit          = OMAP4430_L4WKUP_STATDEP_SHIFT,
-       .flags            = CLKDM_CAN_HWSUP,
+       .flags            = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU,
 };
 
 static struct clockdomain emu_sys_44xx_clkdm = {
index 773193670ea265cefa67f938fa780e938400dbce..2d710f50fca2fafd9cac99d2f515220c41de410f 100644 (file)
@@ -1124,15 +1124,18 @@ static struct omap_hwmod_addr_space * __init _find_mpu_rt_addr_space(struct omap
  * _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG
  * @oh: struct omap_hwmod *
  *
- * If module is marked as SWSUP_SIDLE, force the module out of slave
- * idle; otherwise, configure it for smart-idle.  If module is marked
- * as SWSUP_MSUSPEND, force the module out of master standby;
- * otherwise, configure it for smart-standby.  No return value.
+ * Ensure that the OCP_SYSCONFIG register for the IP block represented
+ * by @oh is set to indicate to the PRCM that the IP block is active.
+ * Usually this means placing the module into smart-idle mode and
+ * smart-standby, but if there is a bug in the automatic idle handling
+ * for the IP block, it may need to be placed into the force-idle or
+ * no-idle variants of these modes.  No return value.
  */
 static void _enable_sysc(struct omap_hwmod *oh)
 {
        u8 idlemode, sf;
        u32 v;
+       bool clkdm_act;
 
        if (!oh->class->sysc)
                return;
@@ -1141,8 +1144,16 @@ static void _enable_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+               clkdm_act = ((oh->clkdm &&
+                             oh->clkdm->flags & CLKDM_ACTIVE_WITH_MPU) ||
+                            (oh->_clk && oh->_clk->clkdm &&
+                             oh->_clk->clkdm->flags & CLKDM_ACTIVE_WITH_MPU));
+               if (clkdm_act && !(oh->class->sysc->idlemodes &
+                                  (SIDLE_SMART | SIDLE_SMART_WKUP)))
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               else
+                       idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
+                               HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
@@ -1208,8 +1219,13 @@ static void _idle_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+               /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
+               if (oh->flags & HWMOD_SWSUP_SIDLE ||
+                   !(oh->class->sysc->idlemodes &
+                     (SIDLE_SMART | SIDLE_SMART_WKUP)))
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               else
+                       idlemode = HWMOD_IDLEMODE_SMART;
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
index f30e861ce6d9cf42c76d90cb78cfcc90cf99f31b..b7bcba5221ba260b31d0327b37442e52fb807143 100644 (file)
@@ -1928,7 +1928,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp1_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
@@ -1963,7 +1963,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp2_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
@@ -1998,7 +1998,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp3_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
@@ -2033,7 +2033,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp4_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
@@ -3864,7 +3864,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
 };
 
 /* usb_host_fs -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_usb_host_fs__l3_main_2 = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_usb_host_fs__l3_main_2 = {
        .master         = &omap44xx_usb_host_fs_hwmod,
        .slave          = &omap44xx_l3_main_2_hwmod,
        .clk            = "l3_div_ck",
@@ -3922,7 +3922,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
 };
 
 /* aess -> l4_abe */
-static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
        .master         = &omap44xx_aess_hwmod,
        .slave          = &omap44xx_l4_abe_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -4013,7 +4013,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
 };
 
 /* l4_abe -> aess */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
        .master         = &omap44xx_l4_abe_hwmod,
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -4031,7 +4031,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
 };
 
 /* l4_abe -> aess (dma) */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
        .master         = &omap44xx_l4_abe_hwmod,
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -5857,7 +5857,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_host_fs_addrs[] = {
 };
 
 /* l4_cfg -> usb_host_fs */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_fs = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
        .master         = &omap44xx_l4_cfg_hwmod,
        .slave          = &omap44xx_usb_host_fs_hwmod,
        .clk            = "l4_div_ck",
@@ -6014,13 +6014,13 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_iva__l3_main_2,
        &omap44xx_l3_main_1__l3_main_2,
        &omap44xx_l4_cfg__l3_main_2,
-       &omap44xx_usb_host_fs__l3_main_2,
+       /* &omap44xx_usb_host_fs__l3_main_2, */
        &omap44xx_usb_host_hs__l3_main_2,
        &omap44xx_usb_otg_hs__l3_main_2,
        &omap44xx_l3_main_1__l3_main_3,
        &omap44xx_l3_main_2__l3_main_3,
        &omap44xx_l4_cfg__l3_main_3,
-       &omap44xx_aess__l4_abe,
+       /* &omap44xx_aess__l4_abe, */
        &omap44xx_dsp__l4_abe,
        &omap44xx_l3_main_1__l4_abe,
        &omap44xx_mpu__l4_abe,
@@ -6029,8 +6029,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_cfg__l4_wkup,
        &omap44xx_mpu__mpu_private,
        &omap44xx_l4_cfg__ocp_wp_noc,
-       &omap44xx_l4_abe__aess,
-       &omap44xx_l4_abe__aess_dma,
+       /* &omap44xx_l4_abe__aess, */
+       /* &omap44xx_l4_abe__aess_dma, */
        &omap44xx_l3_main_2__c2c,
        &omap44xx_l4_wkup__counter_32k,
        &omap44xx_l4_cfg__ctrl_module_core,
@@ -6136,7 +6136,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_per__uart2,
        &omap44xx_l4_per__uart3,
        &omap44xx_l4_per__uart4,
-       &omap44xx_l4_cfg__usb_host_fs,
+       /* &omap44xx_l4_cfg__usb_host_fs, */
        &omap44xx_l4_cfg__usb_host_hs,
        &omap44xx_l4_cfg__usb_otg_hs,
        &omap44xx_l4_cfg__usb_tll_hs,
index 119d5a910f3a4a7ef902b5a2cb4054edc1278d56..43a979075338a76d03894e406952e9d6eb28c1ad 100644 (file)
@@ -32,6 +32,7 @@
 #include "twl-common.h"
 #include "pm.h"
 #include "voltage.h"
+#include "mux.h"
 
 static struct i2c_board_info __initdata pmic_i2c_board_info = {
        .addr           = 0x48,
@@ -77,6 +78,7 @@ void __init omap4_pmic_init(const char *pmic_type,
                    struct twl6040_platform_data *twl6040_data, int twl6040_irq)
 {
        /* PMIC part*/
+       omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
        strncpy(omap4_i2c1_board_info[0].type, pmic_type,
                sizeof(omap4_i2c1_board_info[0].type));
        omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
index d09da6a746b8a15d212e0e1aabcaeacbf8b480cc..d3de84b0dcbed280a27d12092509f6a9df4e181e 100644 (file)
@@ -127,7 +127,11 @@ static unsigned long hx4700_pin_config[] __initdata = {
        GPIO19_SSP2_SCLK,
        GPIO86_SSP2_RXD,
        GPIO87_SSP2_TXD,
-       GPIO88_GPIO,
+       GPIO88_GPIO | MFP_LPM_DRIVE_HIGH,       /* TSC2046_CS */
+
+       /* BQ24022 Regulator */
+       GPIO72_GPIO | MFP_LPM_KEEP_OUTPUT,      /* BQ24022_nCHARGE_EN */
+       GPIO96_GPIO | MFP_LPM_KEEP_OUTPUT,      /* BQ24022_ISET2 */
 
        /* HX4700 specific input GPIOs */
        GPIO12_GPIO | WAKEUP_ON_EDGE_RISE,      /* ASIC3_IRQ */
@@ -135,6 +139,10 @@ static unsigned long hx4700_pin_config[] __initdata = {
        GPIO14_GPIO,    /* nWLAN_IRQ */
 
        /* HX4700 specific output GPIOs */
+       GPIO61_GPIO | MFP_LPM_DRIVE_HIGH,       /* W3220_nRESET */
+       GPIO71_GPIO | MFP_LPM_DRIVE_HIGH,       /* ASIC3_nRESET */
+       GPIO81_GPIO | MFP_LPM_DRIVE_HIGH,       /* CPU_GP_nRESET */
+       GPIO116_GPIO | MFP_LPM_DRIVE_HIGH,      /* CPU_HW_nRESET */
        GPIO102_GPIO | MFP_LPM_DRIVE_LOW,       /* SYNAPTICS_POWER_ON */
 
        GPIO10_GPIO,    /* GSM_IRQ */
@@ -872,14 +880,19 @@ static struct gpio global_gpios[] = {
        { GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
        { GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
        { GPIO32_HX4700_RS232_ON,         GPIOF_OUT_INIT_HIGH, "RS232_ON" },
+       { GPIO61_HX4700_W3220_nRESET,     GPIOF_OUT_INIT_HIGH, "W3220_nRESET" },
        { GPIO71_HX4700_ASIC3_nRESET,     GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
+       { GPIO81_HX4700_CPU_GP_nRESET,    GPIOF_OUT_INIT_HIGH, "CPU_GP_nRESET" },
        { GPIO82_HX4700_EUART_RESET,      GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
+       { GPIO116_HX4700_CPU_HW_nRESET,   GPIOF_OUT_INIT_HIGH, "CPU_HW_nRESET" },
 };
 
 static void __init hx4700_init(void)
 {
        int ret;
 
+       PCFR = PCFR_GPR_EN | PCFR_OPDE;
+
        pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
        gpio_set_wake(GPIO12_HX4700_ASIC3_IRQ, 1);
        ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
index 414364eb426cf70d58a439b7e99b15e26c91a0c7..cb2883d553b5e53c320588eb740d435e111d0758 100644 (file)
@@ -106,7 +106,7 @@ static struct clk s3c2440_clk_cam_upll = {
 static struct clk s3c2440_clk_ac97 = {
        .name           = "ac97",
        .enable         = s3c2410_clkcon_enable,
-       .ctrlbit        = S3C2440_CLKCON_CAMERA,
+       .ctrlbit        = S3C2440_CLKCON_AC97,
 };
 
 static unsigned long  s3c2440_fclk_n_getrate(struct clk *clk)
index e859fcdb3d58cf7e9d31db02bdecc332ac380125..fde0d23121dc6e14acd614504d054d0c83f778c6 100644 (file)
 #include <mach/common.h>
 #include <mach/emev2.h>
 
+#ifdef CONFIG_ARCH_SH73A0
 #define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
                        of_machine_is_compatible("renesas,sh73a0"))
+#else
+#define is_sh73a0() (0)
+#endif
+
 #define is_r8a7779() machine_is_marzen()
 
 #ifdef CONFIG_ARCH_EMEV2
index 1509a3cb58335246e19ddf7140e58b875b6e4a31..4fd93f5c49ec359f6889f054fe33cd076c7bf822 100644 (file)
@@ -625,11 +625,6 @@ static struct platform_device *snowball_platform_devs[] __initdata = {
        &ab8500_device,
 };
 
-static struct platform_device *snowball_of_platform_devs[] __initdata = {
-       &snowball_led_dev,
-       &snowball_key_dev,
-};
-
 static void __init mop500_init_machine(void)
 {
        struct device *parent = NULL;
@@ -769,6 +764,11 @@ MACHINE_END
 
 #ifdef CONFIG_MACH_UX500_DT
 
+static struct platform_device *snowball_of_platform_devs[] __initdata = {
+       &snowball_led_dev,
+       &snowball_key_dev,
+};
+
 struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        /* Requires DMA and call-back bindings. */
        OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat),
@@ -786,6 +786,8 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
        OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
        OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
+       /* Requires device name bindings. */
+       OF_DEV_AUXDATA("stericsson,nmk_pinctrl", 0, "pinctrl-db8500", NULL),
        {},
 };
 
index 741e71feca784134e179a11d0d4389fc80b70a32..66e7f00884ab4b4443d51f56c211ae7ac6385f49 100644 (file)
@@ -63,8 +63,10 @@ static void __init ux500_timer_init(void)
 
        /* TODO: Once MTU has been DT:ed place code above into else. */
        if (of_have_populated_dt()) {
+#ifdef CONFIG_OF
                np = of_find_matching_node(NULL, prcmu_timer_of_match);
                if (!np)
+#endif
                        goto dt_fail;
 
                tmp_base = of_iomap(np, 0);
index bec933b04ef04bebcac7d312b46f710b9ce3a9e3..e95bf84cc837550650ccc053e5afc72486d6682a 100644 (file)
@@ -339,7 +339,6 @@ void __init pci_versatile_preinit(void)
 static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq;
-       int devslot = PCI_SLOT(dev->devfn);
 
        /* slot,  pin,  irq
         *  24     1     27
index c471436c79522d7722ba08a2baec7223e4c663d5..2e8a1efdf7b85f3443294b396ec5673496dd635d 100644 (file)
@@ -64,7 +64,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 #ifdef CONFIG_ZONE_DMA
 extern phys_addr_t arm_dma_limit;
 #else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
 #endif
 
 extern phys_addr_t arm_lowmem_limit;
index e5dad60b558b468315294b2c8b95c70193b6f74d..cf4528d5177448fb79cb5dc6689c873c2dbddb4a 100644 (file)
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        }
 }
 
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h).  However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings.  This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+       struct vm_struct *vm;
+
+       vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+       vm->addr = (void *)addr;
+       vm->size = SECTION_SIZE;
+       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->caller = pmd_empty_section_gap;
+       vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+       struct vm_struct *vm;
+       unsigned long addr, next = 0;
+       pmd_t *pmd;
+
+       /* we're still single threaded hence no lock needed here */
+       for (vm = vmlist; vm; vm = vm->next) {
+               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+                       continue;
+               addr = (unsigned long)vm->addr;
+               if (addr < next)
+                       continue;
+
+               /*
+                * Check if this vm starts on an odd section boundary.
+                * If so and the first section entry for this PMD is free
+                * then we block the corresponding virtual address.
+                */
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr);
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr & PMD_MASK);
+               }
+
+               /*
+                * Then check if this vm ends on an odd section boundary.
+                * If so and the second section entry for this PMD is empty
+                * then we block the corresponding virtual address.
+                */
+               addr += vm->size;
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr) + 1;
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr);
+               }
+
+               /* no need to look at any vm entry until we hit the next PMD */
+               next = (addr + PMD_SIZE - 1) & PMD_MASK;
+       }
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
 static void * __initdata vmalloc_min =
        (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         */
        if (mdesc->map_io)
                mdesc->map_io();
+       fill_pmd_gaps();
 
        /*
         * Finally flush the caches and tlb to ensure that we're in a
index 33ecd0c9f0c3ecfdc63aaf44cab40a8deabd5f0c..b1e05ccff3acdb2ff4e5585ea1fb4947ebd18f28 100644 (file)
@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
                return -EINVAL;
        }
 
-       if (client->is_ts && adc->ts_pend)
-               return -EAGAIN;
-
        spin_lock_irqsave(&adc->lock, flags);
 
+       if (client->is_ts && adc->ts_pend) {
+               spin_unlock_irqrestore(&adc->lock, flags);
+               return -EAGAIN;
+       }
+
        client->channel = channel;
        client->nr_samples = nr_samples;
 
index 1d214cb9d77050c921dc7e69f8b165c2eb89ee7e..6303974c2ee06af0e81a4f60437f0fac86d2f4f6 100644 (file)
@@ -126,7 +126,8 @@ struct platform_device s3c_device_adc = {
 #ifdef CONFIG_CPU_S3C2440
 static struct resource s3c_camif_resource[] = {
        [0] = DEFINE_RES_MEM(S3C2440_PA_CAMIF, S3C2440_SZ_CAMIF),
-       [1] = DEFINE_RES_IRQ(IRQ_CAM),
+       [1] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_C),
+       [2] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_P),
 };
 
 struct platform_device s3c_device_camif = {
index 031a61899beffb07843c198a3237c385e5e2100a..48a1599110372ffb77fcf8e33e89449e4a40b39d 100644 (file)
@@ -37,6 +37,7 @@ struct clk clk_ext_xtal_mux = {
 struct clk clk_xusbxti = {
        .name           = "xusbxti",
        .id             = -1,
+       .rate           = 24000000,
 };
 
 struct clk s5p_clk_27m = {
index a09230a08e02d201e4c2b214a0bd6cabc5a8c223..62ef17676b406274a76ddf5d3a51f7091a9ace97 100644 (file)
@@ -70,4 +70,7 @@ extern int is_in_rom(unsigned long);
 #define        VMALLOC_END     0xffffffff
 
 #define arch_enter_lazy_cpu_mode()    do {} while (0)
+
+#include <asm-generic/pgtable.h>
+
 #endif /* _H8300_PGTABLE_H */
index 356068cd0879bdf145bd68f12857b75cfd95ab4c..8725d1ad427263895a6e5d904c179e2c1440bbe0 100644 (file)
@@ -100,7 +100,6 @@ extern int __put_user_bad(void);
        break;                                                  \
     default:                                                   \
        __gu_err = __get_user_bad();                            \
-       __gu_val = 0;                                           \
        break;                                                  \
     }                                                          \
     (x) = __gu_val;                                            \
@@ -159,4 +158,6 @@ clear_user(void *to, unsigned long n)
        return 0;
 }
 
+#define __clear_user   clear_user
+
 #endif /* _H8300_UACCESS_H */
index fca10378701bb9c818b8c123fb4d3c05522cd0c6..5adaadaf92183c42703b9995e32e622dc926d958 100644 (file)
@@ -447,7 +447,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-statis void do_signal(struct pt_regs *regs)
+static void do_signal(struct pt_regs *regs)
 {
        siginfo_t info;
        int signr;
index 32263a138aa6ccf9a2a871a859254f2a18df03d9..e0f74191d55312fe4a8fa6548d95d34667604efe 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/profile.h>
 
 #include <asm/io.h>
+#include <asm/irq_regs.h>
 #include <asm/timer.h>
 
 #define        TICK_SIZE (tick_nsec / 1000)
index ea453532a33c6dfc0eeb49659b2dc9036494713a..075d87acd12ac4158e090b7918250e958f0d8c05 100644 (file)
@@ -129,7 +129,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
 
        /* setup reset gpio used by pci */
        reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
-       if (reset_gpio > 0)
+       if (gpio_is_valid(reset_gpio))
                devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
 
        /* enable auto-switching between PCI and EBU */
@@ -192,7 +192,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
        ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
 
        /* toggle reset pin */
-       if (reset_gpio > 0) {
+       if (gpio_is_valid(reset_gpio)) {
                __gpio_set_value(reset_gpio, 0);
                wmb();
                mdelay(1);
index 55b79ef10028cbc3253685acbf0979edd6784d08..44251b974f1d96d4931e4704d9433163eb070962 100644 (file)
@@ -81,9 +81,6 @@ struct pt_regs {
 #define PTRACE_GETFPREGS          14
 #define PTRACE_SETFPREGS          15
 
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD     0x00000001
-
 #ifdef __KERNEL__
 
 #define user_mode(regs)                        (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
index 08251d6f6b11015b5d8d265cba4fe88fc70e0f1c..ac519bbd42ffe32693a02d3f88ac5dbd21d426ff 100644 (file)
@@ -123,7 +123,7 @@ static inline unsigned long current_stack_pointer(void)
 }
 
 #ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_info(struct thread_info *ti);
 #endif
 #define get_thread_info(ti)    get_task_struct((ti)->task)
 #define put_thread_info(ti)    put_task_struct((ti)->task)
index bd4e90dfe6c26d37c366abec8275a1d0d1e7b774..f8e66425cbf826f2f62aef4756869442a9ed5fdc 100644 (file)
@@ -11,7 +11,6 @@
 #ifndef _ASM_TIMEX_H
 #define _ASM_TIMEX_H
 
-#include <asm/hardirq.h>
 #include <unit/timex.h>
 
 #define TICK_SIZE (tick_nsec / 1000)
@@ -30,16 +29,6 @@ static inline cycles_t get_cycles(void)
 extern int init_clockevents(void);
 extern int init_clocksource(void);
 
-static inline void setup_jiffies_interrupt(int irq,
-                                          struct irqaction *action)
-{
-       u16 tmp;
-       setup_irq(irq, action);
-       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
-       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
-       tmp = GxICR(irq);
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMEX_H */
index 69cae0260786207d0c3b62fde6255f6b2f442995..ccce35e3e179150c87f5f94af2a2463333236a68 100644 (file)
@@ -70,6 +70,16 @@ static void event_handler(struct clock_event_device *dev)
 {
 }
 
+static inline void setup_jiffies_interrupt(int irq,
+                                          struct irqaction *action)
+{
+       u16 tmp;
+       setup_irq(irq, action);
+       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+       tmp = GxICR(irq);
+}
+
 int __init init_clockevents(void)
 {
        struct clock_event_device *cd;
index a5ac755dd69f4acbc8c6d213c47c285b8af41098..2df440105a80f78e111f4f863261cbb7abab10ae 100644 (file)
@@ -9,6 +9,8 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+#include <linux/irqreturn.h>
+
 struct clocksource;
 struct clock_event_device;
 
index 2381df83bd0064287110896a2cfb0a2678a8a046..35932a8de8b8d299fd7aaebcf31d56eb7c6db6d3 100644 (file)
@@ -170,9 +170,9 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
        case SC1TXIRQ:
 #ifdef CONFIG_MN10300_TTYSM1_TIMER12
        case TM12IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER9
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER9)
        case TM9IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER3
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
        case TM3IRQ:
 #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
 #endif /* CONFIG_MN10300_TTYSM1 */
index 94a9c6d53e1b890ea98d987628fad089d5fd012d..b900e5afa0aefae7969666fbb1cc9cf2e77d5994 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/kdebug.h>
 #include <linux/bug.h>
 #include <linux/irq.h>
+#include <linux/export.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/io.h>
index 159acb02cfd4c16dcd1e91f9268b8d9dc6e74463..e244ebe637e15436f643a63e28217d9714321645 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/string.h>
 #include <linux/pci.h>
 #include <linux/gfp.h>
+#include <linux/export.h>
 #include <asm/io.h>
 
 static unsigned long pci_sram_allocated = 0xbc000000;
index cc18fe7d8b90e2abc9061b430b7072386aeed0d8..c37f9832cf17d0a7cb5b8795766aa4c5feebd88e 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index 43c246439413b8c5f70d9b50d79b733443e42882..53677694b16554af24b8c8262191673426ae193c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/platform_device.h>
 
 #include <asm/io.h>
+#include <asm/irq.h>
 #include <asm/timex.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
index 758af30d1a16aad5b74820431e5abda8cc4ed1df..4cefc224f448d98843fe44c7e67631e98fa37466 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index e1becd6b757132bd5664e82c5a676250ad660c3a..bc4adfaf815c1c2276c2f0f18bd84924b10c12d7 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <asm/io.h>
+#include <asm/irq.h>
 #include <asm/setup.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
index ddb7ed0107065e19132dc242cd5b42b361c80f21..42f32db75087cc36b2f089ee6cd7c1bd3629e35b 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index 6eb75b80488c8e613af39746fa2cbc8f96888b3c..0554ab062bdc555bdd1d28fb655519b8a39fcbe5 100644 (file)
@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
 }
 
 #ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable()    asm volatile("wrteei 1" : : : "memory");
-#define __hard_irq_disable()   asm volatile("wrteei 0" : : : "memory");
+#define __hard_irq_enable()    asm volatile("wrteei 1" : : : "memory")
+#define __hard_irq_disable()   asm volatile("wrteei 0" : : : "memory")
 #else
 #define __hard_irq_enable()    __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
 #define __hard_irq_disable()   __mtmsrd(local_paca->kernel_msr, 1)
@@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
        return !regs->softe;
 }
 
+extern bool prep_irq_for_idle(void);
+
 #else /* CONFIG_PPC64 */
 
 #define SET_MSR_EE(x)  mtmsr(x)
index 1b415027ec0e3c8031c18b5d1dabfeebff054fc5..1f017bb7a7cebfc3278fecfd7abe8f3dfaada107 100644 (file)
@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
         */
        if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
                __hard_irq_disable();
-#ifdef CONFIG_TRACE_IRQFLAG
+#ifdef CONFIG_TRACE_IRQFLAGS
        else {
                /*
                 * We should already be hard disabled here. We had bugs
@@ -286,6 +286,52 @@ void notrace restore_interrupts(void)
                __hard_irq_enable();
 }
 
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+       /*
+        * First we need to hard disable to ensure no interrupt
+        * occurs before we effectively enter the low power state
+        */
+       hard_irq_disable();
+
+       /*
+        * If anything happened while we were soft-disabled,
+        * we return now and do not enter the low power state.
+        */
+       if (lazy_irq_pending())
+               return false;
+
+       /* Tell lockdep we are about to re-enable */
+       trace_hardirqs_on();
+
+       /*
+        * Mark interrupts as soft-enabled and clear the
+        * PACA_IRQ_HARD_DIS from the pending mask since we
+        * are about to hard enable as well as a side effect
+        * of entering the low power state.
+        */
+       local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+       local_paca->soft_enabled = 1;
+
+       /* Tell the caller to enter the low power state */
+       return true;
+}
+
 #endif /* CONFIG_PPC64 */
 
 int arch_show_interrupts(struct seq_file *p, int prec)
index a84aafce2a129e311a0943be0975db3823b88402..a1044f43becd380cdc7216e082fbf267b97a3fae 100644 (file)
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        lwz     r3,VCORE_NAPPING_THREADS(r5)
        lwz     r4,VCPU_PTID(r9)
        li      r0,1
-       sldi    r0,r0,r4
+       sld     r0,r0,r4
        andc.   r3,r3,r0                /* no sense IPI'ing ourselves */
        beq     43f
        mulli   r4,r4,PACA_SIZE         /* get paca for thread 0 */
index 3ff9013d6e7914e59f2919f5ee5bf685ad7d7797..ee02b30878ed4bec733af6cbd9aa152eefe22d0f 100644 (file)
@@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
        case H_PUT_TCE:
                return kvmppc_h_pr_put_tce(vcpu);
        case H_CEDE:
+               vcpu->arch.shared->msr |= MSR_EE;
                kvm_vcpu_block(vcpu);
                clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                vcpu->stat.halt_wakeup++;
index 6e8f677f5646e4c66c801b3bc670c615e880c5b9..1e95556dc692e3702f2df518dd4e08e7770ae52e 100644 (file)
@@ -639,7 +639,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
        unsigned int n, rc, ranges, is_kexec_kdump = 0;
        unsigned long lmb_size, base, size, sz;
        int nid;
-       struct assoc_arrays aa;
+       struct assoc_arrays aa = { .arrays = NULL };
 
        n = of_get_drconf_memory(memory, &dm);
        if (!n)
index efdacc829576582a0b4da52384f80ad62082cd4e..d17e98bc0c10d3ff0469cfa8af873f08d57abded 100644 (file)
@@ -42,11 +42,9 @@ static void cbe_power_save(void)
 {
        unsigned long ctrl, thread_switch_control;
 
-       /*
-        * We need to hard disable interrupts, the local_irq_enable() done by
-        * our caller upon return will hard re-enable.
-        */
-       hard_irq_disable();
+       /* Ensure our interrupt state is properly tracked */
+       if (!prep_irq_for_idle())
+               return;
 
        ctrl = mfspr(SPRN_CTRLF);
 
@@ -81,6 +79,9 @@ static void cbe_power_save(void)
         */
        ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
        mtspr(SPRN_CTRLT, ctrl);
+
+       /* Re-enable interrupts in MSR */
+       __hard_irq_enable();
 }
 
 static int cbe_system_reset_exception(struct pt_regs *regs)
index e61483e8e96083b14d92430bae3f463947635fa8..c71be66bd5dc2ff3bc6e0b3309ad969859cb4b84 100644 (file)
@@ -99,15 +99,18 @@ out:
 static void check_and_cede_processor(void)
 {
        /*
-        * Interrupts are soft-disabled at this point,
-        * but not hard disabled. So an interrupt might have
-        * occurred before entering NAP, and would be potentially
-        * lost (edge events, decrementer events, etc...) unless
-        * we first hard disable then check.
+        * Ensure our interrupt state is properly tracked,
+        * also checks if no interrupt has occurred while we
+        * were soft-disabled
         */
-       hard_irq_disable();
-       if (!lazy_irq_pending())
+       if (prep_irq_for_idle()) {
                cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+               /* Ensure that H_CEDE returns with IRQs on */
+               if (WARN_ON(!(mfmsr() & MSR_EE)))
+                       __hard_irq_enable();
+#endif
+       }
 }
 
 static int dedicated_cede_loop(struct cpuidle_device *dev,
index 0f3ab06d222260c63d5866e6d6b7ab0ac643e761..eab3492a45c5c5244eca42d58354cf6d8a092836 100644 (file)
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
                /* print cpus waiting or in xmon */
                printf("cpus stopped:");
                count = 0;
-               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+               for_each_possible_cpu(cpu) {
                        if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
                                if (count == 0)
                                        printf(" %x", cpu);
index e136d28d1d2ee5ad97f75866f9e9f3e4d118eba9..4d48f1436a63b72a34f8201ad8cb88416bc1a37d 100644 (file)
@@ -19,9 +19,20 @@ static inline u32 inl(unsigned long addr)
        return -1;
 }
 
-#define outb(x, y)     BUG()
-#define outw(x, y)     BUG()
-#define outl(x, y)     BUG()
+static inline void outb(unsigned char x, unsigned long port)
+{
+       BUG();
+}
+
+static inline void outw(unsigned short x, unsigned long port)
+{
+       BUG();
+}
+
+static inline void outl(unsigned int x, unsigned long port)
+{
+       BUG();
+}
 
 #define inb_p(addr)    inb(addr)
 #define inw_p(addr)    inw(addr)
index 8832c526cdf92d68cb8b59a48d2a05d8942ab181..c4a0336660dd102d6542b3f3a5721214439dce08 100644 (file)
@@ -2,7 +2,7 @@
 #include <linux/serial_core.h>
 #include <linux/io.h>
 #include <cpu/serial.h>
-#include <asm/gpio.h>
+#include <cpu/gpio.h>
 
 static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
 {
index 9092ce8aa6b472b302ec31d514bb2da71fac3bfe..f8b74ca83b9257a245adaa6a03d05cdb75431c97 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <asm/byteorder.h>
 #include <asm/backtrace.h>
 #include <asm/tile-desc.h>
 #include <arch/abi.h>
@@ -336,8 +337,12 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
                                bytes_to_prefetch / sizeof(tile_bundle_bits);
                }
 
-               /* Decode the next bundle. */
-               bundle.bits = prefetched_bundles[next_bundle++];
+               /*
+                * Decode the next bundle.
+                * TILE always stores instruction bundles in little-endian
+                * mode, even when the chip is running in big-endian mode.
+                */
+               bundle.bits = le64_to_cpu(prefetched_bundles[next_bundle++]);
                bundle.num_insns =
                        parse_insn_tile(bundle.bits, pc, bundle.insns);
                num_info_ops = bt_get_info_ops(&bundle, info_operands);
index 88e466b159dcac92a13d81b5919b0a5487d63830..43b39d61b538698229a23f654b7fb44a335187b3 100644 (file)
@@ -705,7 +705,6 @@ static void stack_proc(void *arg)
        struct task_struct *from = current, *to = arg;
 
        to->thread.saved_task = from;
-       rcu_switch_from(from);
        switch_to(from, to, from);
 }
 
index 7515cf0e1805eae308e1dd0650b1dd33a8d8564d..5db36caf4289cdab3fe04bf899f4826504ced389 100644 (file)
@@ -139,6 +139,19 @@ static int addr_to_vsyscall_nr(unsigned long addr)
        return nr;
 }
 
+#ifdef CONFIG_SECCOMP
+static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
+{
+       if (!seccomp_mode(&tsk->seccomp))
+               return 0;
+       task_pt_regs(tsk)->orig_ax = syscall_nr;
+       task_pt_regs(tsk)->ax = syscall_nr;
+       return __secure_computing(syscall_nr);
+}
+#else
+#define vsyscall_seccomp(_tsk, _nr) 0
+#endif
+
 static bool write_ok_or_segv(unsigned long ptr, size_t size)
 {
        /*
@@ -174,6 +187,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        int vsyscall_nr;
        int prev_sig_on_uaccess_error;
        long ret;
+       int skip;
 
        /*
         * No point in checking CS -- the only way to get here is a user mode
@@ -205,9 +219,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        }
 
        tsk = current;
-       if (seccomp_mode(&tsk->seccomp))
-               do_exit(SIGKILL);
-
        /*
         * With a real vsyscall, page faults cause SIGSEGV.  We want to
         * preserve that behavior to make writing exploits harder.
@@ -222,8 +233,13 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
         * address 0".
         */
        ret = -EFAULT;
+       skip = 0;
        switch (vsyscall_nr) {
        case 0:
+               skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
                    !write_ok_or_segv(regs->si, sizeof(struct timezone)))
                        break;
@@ -234,6 +250,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
                break;
 
        case 1:
+               skip = vsyscall_seccomp(tsk, __NR_time);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(time_t)))
                        break;
 
@@ -241,6 +261,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
                break;
 
        case 2:
+               skip = vsyscall_seccomp(tsk, __NR_getcpu);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
                    !write_ok_or_segv(regs->si, sizeof(unsigned)))
                        break;
@@ -253,6 +277,12 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
 
+       if (skip) {
+               if ((long)regs->ax <= 0L) /* seccomp errno emulation */
+                       goto do_ret;
+               goto done; /* seccomp trace/trap */
+       }
+
        if (ret == -EFAULT) {
                /* Bad news -- userspace fed a bad pointer to a vsyscall. */
                warn_bad_vsyscall(KERN_INFO, regs,
@@ -271,10 +301,11 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        regs->ax = ret;
 
+do_ret:
        /* Emulate a ret instruction. */
        regs->ip = caller;
        regs->sp += 8;
-
+done:
        return true;
 
 sigsegv:
index be3cea4407ffad63824c068332079baf16336012..57e168e27b5b865e187d5ee3d34413189a1c5905 100644 (file)
@@ -3934,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
 {
        struct kvm_mmu_page *page;
 
+       if (list_empty(&kvm->arch.active_mmu_pages))
+               return;
+
        page = container_of(kvm->arch.active_mmu_pages.prev,
                            struct kvm_mmu_page, link);
        kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
index 9b306e550e3f06ddfa342c041c7793543702c4b3..2c8d6a3d250afd2d18fa8b41b03dc26b0ad97ca2 100644 (file)
@@ -277,7 +277,7 @@ void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
 
        /* Don't leak any random bits. */
 
-       memset(elfregs, 0, sizeof (elfregs));
+       memset(elfregs, 0, sizeof(*elfregs));
 
        /* Note:  PS.EXCM is not set while user task is running; its
         * being set in regs->ps is for exception handling convenience.
index 02cf6335e9bdc5bb940ec89fcdbfe63746f9b5df..e7dee617358e810aec57ff25162ff95604fabb3c 100644 (file)
@@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
 
                blkg->pd[i] = pd;
                pd->blkg = blkg;
-       }
-
-       /* invoke per-policy init */
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
 
+               /* invoke per-policy init */
                if (blkcg_policy_enabled(blkg->q, pol))
                        pol->pd_init_fn(blkg);
        }
@@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
-       struct request_queue *q = blkg->q;
        struct blkcg *blkcg = blkg->blkcg;
 
-       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
 
        /* Something wrong if we are trying to remove same group twice */
index 3c923a7aeb56f1658142b091868c3c29ebffd3c5..93eb3e4f88ce78affc79c5ca6d8b7c7b0759be5a 100644 (file)
@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
  */
 void blk_drain_queue(struct request_queue *q, bool drain_all)
 {
+       int i;
+
        while (true) {
                bool drain = false;
-               int i;
 
                spin_lock_irq(q->queue_lock);
 
@@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                        break;
                msleep(10);
        }
+
+       /*
+        * With queue marked dead, any woken up waiter will fail the
+        * allocation path, so the wakeup chaining is lost and we're
+        * left with hung waiters. We need to wake up those waiters.
+        */
+       if (q->request_fn) {
+               spin_lock_irq(q->queue_lock);
+               for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
+                       wake_up_all(&q->rq.wait[i]);
+               spin_unlock_irq(q->queue_lock);
+       }
 }
 
 /**
@@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
        /* mark @q DEAD, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-
        spin_lock_irq(lock);
 
        /*
@@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
-
-       if (q->queue_lock != &q->__queue_lock)
-               q->queue_lock = &q->__queue_lock;
-
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
@@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
 
+       spin_lock_irq(lock);
+       if (q->queue_lock != &q->__queue_lock)
+               q->queue_lock = &q->__queue_lock;
+       spin_unlock_irq(lock);
+
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
index 780354888958cd7ea03aef2c1654b325bc9fb24e..6e4744cbfb56b4ca0d99062a0d9b0437c894016d 100644 (file)
@@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
                mod_timer(&q->timeout, expiry);
 }
 
-/**
- * blk_abort_queue -- Abort all request on given queue
- * @queue:     pointer to queue
- *
- */
-void blk_abort_queue(struct request_queue *q)
-{
-       unsigned long flags;
-       struct request *rq, *tmp;
-       LIST_HEAD(list);
-
-       /*
-        * Not a request based block device, nothing to abort
-        */
-       if (!q->request_fn)
-               return;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       elv_abort_queue(q);
-
-       /*
-        * Splice entries to local list, to avoid deadlocking if entries
-        * get readded to the timeout list by error handling
-        */
-       list_splice_init(&q->timeout_list, &list);
-
-       list_for_each_entry_safe(rq, tmp, &list, timeout_list)
-               blk_abort_request(rq);
-
-       /*
-        * Occasionally, blk_abort_request() will return without
-        * deleting the element from the list. Make sure we add those back
-        * instead of leaving them on the local stack list.
-        */
-       list_splice(&list, &q->timeout_list);
-
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-}
-EXPORT_SYMBOL_GPL(blk_abort_queue);
index 673c977cc2bfa238e0fe0efa6dddac5193fbccd8..fb52df9744f5fe411908162fbb02257ca0bc097a 100644 (file)
@@ -17,8 +17,6 @@
 #include "blk.h"
 #include "blk-cgroup.h"
 
-static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
-
 /*
  * tunables
  */
@@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 }
 
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
-       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 {
        return pd_to_blkg(&cfqg->pd);
@@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
+static struct blkcg_policy blkcg_policy_cfq;
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
 static inline void cfqg_get(struct cfq_group *cfqg)
 {
        return blkg_get(cfqg_to_blkg(cfqg));
@@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
 
        cfq_shutdown_timer_wq(cfqd);
 
-#ifndef CONFIG_CFQ_GROUP_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
+#else
        kfree(cfqd->root_group);
 #endif
-       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
        kfree(cfqd);
 }
 
@@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
        if (!cfq_group_idle)
                cfq_group_idle = 1;
-#else
-               cfq_group_idle = 0;
-#endif
 
        ret = blkcg_policy_register(&blkcg_policy_cfq);
        if (ret)
                return ret;
+#else
+       cfq_group_idle = 0;
+#endif
 
+       ret = -ENOMEM;
        cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
                goto err_pol_unreg;
@@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
 err_free_pool:
        kmem_cache_destroy(cfq_pool);
 err_pol_unreg:
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        return ret;
 }
 
 static void __exit cfq_exit(void)
 {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        elv_unregister(&iosched_cfq);
        kmem_cache_destroy(cfq_pool);
 }
index 260fa80ef5750f80f4ebc1415d06abc992e18b8a..9a87daa6f4fbd10202ea9d47ae1a549c806a6fb4 100644 (file)
@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
                break;
        }
 
+       if (capable(CAP_SYS_RAWIO))
+               return 0;
+
        /* In particular, rule out all resets and host-specific ioctls.  */
        printk_ratelimited(KERN_WARNING
                           "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
 
-       return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+       return -ENOIOCTLCMD;
 }
 EXPORT_SYMBOL(scsi_verify_blk_ioctl);
 
index 0ed85cac32314f956de3092f7087e3ab011b5ba7..615996a36bedcef10f63bc929b21b3861c9781c6 100644 (file)
@@ -95,18 +95,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
                return_ACPI_STATUS(status);
        }
 
-       if (sleep_state != ACPI_STATE_S5) {
-               /*
-                * Disable BM arbitration. This feature is contained within an
-                * optional register (PM2 Control), so ignore a BAD_ADDRESS
-                * exception.
-                */
-               status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
-               if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
-                       return_ACPI_STATUS(status);
-               }
-       }
-
        /*
         * 1) Disable/Clear all GPEs
         * 2) Enable all wakeup GPEs
@@ -364,16 +352,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
                                    [ACPI_EVENT_POWER_BUTTON].
                                    status_register_id, ACPI_CLEAR_STATUS);
 
-       /*
-        * Enable BM arbitration. This feature is contained within an
-        * optional register (PM2 Control), so ignore a BAD_ADDRESS
-        * exception.
-        */
-       status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
-       if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
-               return_ACPI_STATUS(status);
-       }
-
        acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
        return_ACPI_STATUS(status);
 }
index 23ce096864186a1cfd6ced9e57fd17d208646004..fe6626035495bb398fbf2d7fc7097d195a916d29 100644 (file)
@@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
                        /* Create the new outer package and populate it */
 
                        status =
-                           acpi_ns_wrap_with_package(data, *elements,
+                           acpi_ns_wrap_with_package(data, return_object,
                                                      return_object_ptr);
                        if (ACPI_FAILURE(status)) {
                                return (status);
index c850de4c9a146883a7d91f16c8cd8ddc3b694940..eff722278ff539535bec85d5f5f6f4f2b04b79bf 100644 (file)
@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
                 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
                 * }
                 *
-                * Ignores apic_id and always return 0 for CPU0's handle.
+                * Ignores apic_id and always returns 0 for the processor
+                * handle with acpi id 0 if nr_cpu_ids is 1.
+                * This should be the case if SMP tables are not found.
                 * Return -1 for other CPU's handle.
                 */
-               if (acpi_id == 0)
+               if (nr_cpu_ids <= 1 && acpi_id == 0)
                        return acpi_id;
                else
                        return apic_id;
index b5c5ff53cb57f74e89cc59bad8ca6e29df5e1db5..fcb956bb4b4c525875f961e7c2398b20c35723f4 100644 (file)
@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
                first_word = 0;
                spin_lock_irq(&b->bm_lock);
        }
-
        /* last page (respectively only page, for first page == last page) */
        last_word = MLPP(el >> LN2_BPL);
-       bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+       /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+        * ==> e = 32767, el = 32768, last_page = 2,
+        * and now last_word = 0.
+        * We do not want to touch last_page in this case,
+        * as we did not allocate it, it is not present in bitmap->bm_pages.
+        */
+       if (last_word)
+               bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
 
        /* possibly trailing bits.
         * example: (e & 63) == 63, el will be e+1.
index 9c5c84946b056792fa45d99051bd5c28750db7e1..8e93a6ac9bb65e3497f53c92723a67429ffdf946 100644 (file)
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
-               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+               if (req->rq_state & RQ_LOCAL_ABORTED) {
+                       _req_may_be_done(req, m);
+                       break;
+               }
 
                __drbd_chk_io_error(mdev, false);
 
        goto_queue_for_net_read:
 
+               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+
                /* no point in retrying if there is no good remote data,
                 * or we have no connection. */
                if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
        return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 }
 
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+       int congested = 0;
+
+       /* If I don't even have good local storage, we can not reasonably try
+        * to pull ahead of the peer. We also need the local reference to make
+        * sure mdev->act_log is there.
+        * Note: caller has to make sure that net_conf is there.
+        */
+       if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+               return;
+
+       if (mdev->net_conf->cong_fill &&
+           atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+               dev_info(DEV, "Congestion-fill threshold reached\n");
+               congested = 1;
+       }
+
+       if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+               dev_info(DEV, "Congestion-extents threshold reached\n");
+               congested = 1;
+       }
+
+       if (congested) {
+               queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+               if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+                       _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+               else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+                       _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+       }
+       put_ldev(mdev);
+}
+
 static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
        const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
                _req_mod(req, queue_for_send_oos);
 
        if (remote &&
-           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
-               int congested = 0;
-
-               if (mdev->net_conf->cong_fill &&
-                   atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
-                       dev_info(DEV, "Congestion-fill threshold reached\n");
-                       congested = 1;
-               }
-
-               if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
-                       dev_info(DEV, "Congestion-extents threshold reached\n");
-                       congested = 1;
-               }
-
-               if (congested) {
-                       queue_barrier(mdev); /* last barrier, after mirrored writes */
-
-                       if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
-                               _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
-                       else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
-                               _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
-               }
-       }
+           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+               maybe_pull_ahead(mdev);
 
        spin_unlock_irq(&mdev->req_lock);
        kfree(b); /* if someone else has beaten us to it... */
index cce7df367b793d111ef875bd5cd30a6f5e08782c..553f43a90953cab40f421658ecb37fa9c20c8c54 100644 (file)
@@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
 
        if (drive == current_reqD)
                drive = current_drive;
+       __cancel_delayed_work(&fd_timeout);
 
        if (drive < 0 || drive >= N_DRIVE) {
                delay = 20UL * HZ;
index bbca966f8f66a20b04f62ac01ff41cd5ef18c037..3bba65510d23afdf39070f7d23552e8e15e27af2 100644 (file)
@@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
        struct gendisk *disk;
        int err;
 
+       err = -ENOMEM;
        lo = kzalloc(sizeof(*lo), GFP_KERNEL);
-       if (!lo) {
-               err = -ENOMEM;
+       if (!lo)
                goto out;
-       }
 
-       err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
-       if (err < 0)
+       if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
                goto out_free_dev;
 
        if (i >= 0) {
index 264bc77dcb911c7030c787d3ad87cd79b654ce81..a8fddeb3d638ebcdf9a0191beda816d8f1338ccd 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kthread.h>
 #include <../drivers/ata/ahci.h>
 #include <linux/export.h>
+#include <linux/debugfs.h>
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ         (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
  * allocated in mtip_init().
  */
 static int mtip_major;
+static struct dentry *dfs_parent;
 
 static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
 }
 
 /*
- * Sysfs register/status dump.
+ * Sysfs status dump.
  *
  * @dev  Pointer to the device structure, passed by the kernrel.
  * @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
  * return value
  *     The size, in bytes, of the data copied into buf.
  */
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
 {
-       u32 group_allocated;
        struct driver_data *dd = dev_to_disk(dev)->private_data;
        int size = 0;
+
+       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "thermal_shutdown\n");
+       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "write_protect\n");
+       else
+               size += sprintf(buf, "%s", "online\n");
+
+       return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
+{
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       u32 group_allocated;
+       int size = *offset;
        int n;
 
-       size += sprintf(&buf[size], "Hardware\n--------\n");
-       size += sprintf(&buf[size], "S ACTive      : [ 0x");
+       if (!len || size)
+               return 0;
+
+       if (size < 0)
+               return -EINVAL;
+
+       size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                         readl(dd->port->s_active[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Command Issue : [ 0x");
+       size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                        readl(dd->port->cmd_issue[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Completed     : [ 0x");
+       size += sprintf(&buf[size], "H/ Completed     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                readl(dd->port->completed[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->port->mmio + PORT_IRQ_STAT));
-       size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->mmio + HOST_IRQ_STAT));
        size += sprintf(&buf[size], "\n");
 
-       size += sprintf(&buf[size], "Local\n-----\n");
-       size += sprintf(&buf[size], "Allocated    : [ 0x");
+       size += sprintf(&buf[size], "L/ Allocated     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       size += sprintf(&buf[size], "Commands in Q: [ 0x");
+       size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static ssize_t mtip_hw_show_status(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
 {
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       int size = *offset;
 
-       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "thermal_shutdown\n");
-       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "write_protect\n");
-       else
-               size += sprintf(buf, "%s", "online\n");
-
-       return size;
-}
+       if (!len || size)
+               return 0;
 
-static ssize_t mtip_hw_show_flags(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       if (size < 0)
+               return -EINVAL;
 
-       size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
                                                        dd->port->flags);
-       size += sprintf(&buf[size], "Flag in dd struct   : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
                                                        dd->dd_flag);
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
+static const struct file_operations mtip_regs_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_registers,
+       .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_flags,
+       .llseek = no_llseek,
+};
 
 /*
  * Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       if (sysfs_create_file(kobj, &dev_attr_registers.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'registers' sysfs entry\n");
        if (sysfs_create_file(kobj, &dev_attr_status.attr))
                dev_warn(&dd->pdev->dev,
                        "Error creating 'status' sysfs entry\n");
-       if (sysfs_create_file(kobj, &dev_attr_flags.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'flags' sysfs entry\n");
        return 0;
 }
 
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       sysfs_remove_file(kobj, &dev_attr_registers.attr);
        sysfs_remove_file(kobj, &dev_attr_status.attr);
-       sysfs_remove_file(kobj, &dev_attr_flags.attr);
 
        return 0;
 }
 
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+       if (!dfs_parent)
+               return -1;
+
+       dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+       if (IS_ERR_OR_NULL(dd->dfs_node)) {
+               dev_warn(&dd->pdev->dev,
+                       "Error creating node %s under debugfs\n",
+                                               dd->disk->disk_name);
+               dd->dfs_node = NULL;
+               return -1;
+       }
+
+       debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_flags_fops);
+       debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_regs_fops);
+
+       return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+       debugfs_remove_recursive(dd->dfs_node);
+}
+
+
 /*
  * Perform any init/resume time hardware setup
  *
@@ -3730,6 +3784,7 @@ skip_create_disk:
                mtip_hw_sysfs_init(dd, kobj);
                kobject_put(kobj);
        }
+       mtip_hw_debugfs_init(dd);
 
        if (dd->mtip_svc_handler) {
                set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
        return rv;
 
 kthread_run_error:
+       mtip_hw_debugfs_exit(dd);
+
        /* Delete our gendisk. This also removes the device from /dev */
        del_gendisk(dd->disk);
 
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
                        kobject_put(kobj);
                }
        }
+       mtip_hw_debugfs_exit(dd);
 
        /*
         * Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
        }
        mtip_major = error;
 
+       if (!dfs_parent) {
+               dfs_parent = debugfs_create_dir("rssd", NULL);
+               if (IS_ERR_OR_NULL(dfs_parent)) {
+                       printk(KERN_WARNING "Error creating debugfs parent\n");
+                       dfs_parent = NULL;
+               }
+       }
+
        /* Register our PCI operations. */
        error = pci_register_driver(&mtip_pci_driver);
-       if (error)
+       if (error) {
+               debugfs_remove(dfs_parent);
                unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+       }
 
        return error;
 }
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
  */
 static void __exit mtip_exit(void)
 {
+       debugfs_remove_recursive(dfs_parent);
+
        /* Release the allocated major block device number. */
        unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 
index b2c88da26b2a7b7f94ff77b6f1a1d2047f6b45f3..f51fc23d17bb0e0025c74ac9f495007dcbc8b784 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/ata.h>
 #include <linux/interrupt.h>
 #include <linux/genhd.h>
-#include <linux/version.h>
 
 /* Offset of Subsystem Device ID in pci confoguration space */
 #define PCI_SUBSYSTEM_DEVICEID 0x2E
  #define dbg_printk(format, arg...)
 #endif
 
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
 #define __force_bit2int (unsigned int __force)
 
 enum {
@@ -447,6 +448,8 @@ struct driver_data {
        unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
 
        struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+       struct dentry *dfs_node;
 };
 
 #endif
index aa2712060bfbcd153e446f5adcf80a9d61377e75..9a72277a31df0cb131f879bb8a1503a65a57b7cc 100644 (file)
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
        }
 }
 
+struct mm_plug_cb {
+       struct blk_plug_cb cb;
+       struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+       struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+       spin_lock_irq(&mmcb->card->lock);
+       activate(mmcb->card);
+       spin_unlock_irq(&mmcb->card->lock);
+       kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+       struct blk_plug *plug = current->plug;
+       struct mm_plug_cb *mmcb;
+
+       if (!plug)
+               return 0;
+
+       list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+               if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+                       return 1;
+       }
+       /* Not currently on the callback list */
+       mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+       if (!mmcb)
+               return 0;
+
+       mmcb->card = card;
+       mmcb->cb.callback = mm_unplug;
+       list_add(&mmcb->cb.list, &plug->cb_list);
+       return 1;
+}
+
 static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
        *card->biotail = bio;
        bio->bi_next = NULL;
        card->biotail = &bio->bi_next;
+       if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+               activate(card);
        spin_unlock_irq(&card->lock);
 
        return;
index 773cf27dc23fc595d6fca48ea8ec6c4992438d18..9ad3b5ec1dc1c521085db47a7928cc8cf1179701 100644 (file)
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
index 60eed4bdd2e4528ae3c3b8871cd65f85d3c34952..e4fb3374dcd2aaa6d0f834cd9394564a3c022a38 100644 (file)
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
        return free;
 }
 
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
                               unsigned long id)
 {
+       if (info->shadow[id].req.u.rw.id != id)
+               return -EINVAL;
+       if (info->shadow[id].request == NULL)
+               return -EINVAL;
        info->shadow[id].req.u.rw.id  = info->shadow_free;
        info->shadow[id].request = NULL;
        info->shadow_free = id;
+       return 0;
 }
 
+static const char *op_name(int op)
+{
+       static const char *const names[] = {
+               [BLKIF_OP_READ] = "read",
+               [BLKIF_OP_WRITE] = "write",
+               [BLKIF_OP_WRITE_BARRIER] = "barrier",
+               [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+               [BLKIF_OP_DISCARD] = "discard" };
+
+       if (op < 0 || op >= ARRAY_SIZE(names))
+               return "unknown";
+
+       if (!names[op])
+               return "reserved";
+
+       return names[op];
+}
 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
 {
        unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
                bret = RING_GET_RESPONSE(&info->ring, i);
                id   = bret->id;
+               /*
+                * The backend has messed up and given us an id that we would
+                * never have given to it (we stamp it up to BLK_RING_SIZE -
+                * look in get_id_from_freelist.
+                */
+               if (id >= BLK_RING_SIZE) {
+                       WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       /* We can't safely get the 'struct request' as
+                        * the id is busted. */
+                       continue;
+               }
                req  = info->shadow[id].request;
 
                if (bret->operation != BLKIF_OP_DISCARD)
                        blkif_completion(&info->shadow[id]);
 
-               add_id_to_freelist(info, id);
+               if (add_id_to_freelist(info, id)) {
+                       WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       continue;
+               }
 
                error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
-                               printk(KERN_WARNING "blkfront: %s: discard op failed\n",
-                                          info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                          info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
-                               printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     info->shadow[id].req.u.rw.nr_segments == 0)) {
-                               printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(error)) {
index dcbe05616090de6d5a830cc8a5950fb282820eee..9a1eb0cfa95f3f0a00a7334f3b276bf623d88cf6 100644 (file)
@@ -1067,26 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 
        old_parent = clk->parent;
 
-       /* find index of new parent clock using cached parent ptrs */
-       if (clk->parents)
-               for (i = 0; i < clk->num_parents; i++)
-                       if (clk->parents[i] == parent)
-                               break;
-       else
+       if (!clk->parents)
                clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
                                                                GFP_KERNEL);
 
        /*
-        * find index of new parent clock using string name comparison
-        * also try to cache the parent to avoid future calls to __clk_lookup
+        * find index of new parent clock using cached parent ptrs,
+        * or if not yet cached, use string name comparison and cache
+        * them now to avoid future calls to __clk_lookup.
         */
-       if (i == clk->num_parents)
-               for (i = 0; i < clk->num_parents; i++)
-                       if (!strcmp(clk->parent_names[i], parent->name)) {
-                               if (clk->parents)
-                                       clk->parents[i] = __clk_lookup(parent->name);
-                               break;
-                       }
+       for (i = 0; i < clk->num_parents; i++) {
+               if (clk->parents && clk->parents[i] == parent)
+                       break;
+               else if (!strcmp(clk->parent_names[i], parent->name)) {
+                       if (clk->parents)
+                               clk->parents[i] = __clk_lookup(parent->name);
+                       break;
+               }
+       }
 
        if (i == clk->num_parents) {
                pr_debug("%s: clock %s is not a possible parent of clock %s\n",
index c4067d0141f7c083ea9de58c72dc0ea3122ca300..542f0c04b6958037b3cf1c468e7705f779f8e11b 100644 (file)
@@ -136,7 +136,7 @@ config GPIO_MPC8XXX
 
 config GPIO_MSM_V1
        tristate "Qualcomm MSM GPIO v1"
-       depends on GPIOLIB && ARCH_MSM
+       depends on GPIOLIB && ARCH_MSM && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
        help
          Say yes here to support the GPIO interface on ARM v6 based
          Qualcomm MSM chips.  Most of the pins on the MSM can be
index 9e9947cb86a3d4fe29bb018987176275d4476e2c..1077754f8289e37ca639541e8d9dd5936c858e7c 100644 (file)
@@ -98,6 +98,7 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
 
        return 0;
 }
+EXPORT_SYMBOL(devm_gpio_request_one);
 
 /**
  *      devm_gpio_free - free an interrupt
index c337143b18f8f97d1fa80c61d84cfe9149b75aff..c89c4c1e668d97cf170f4fc07dc5911d83cb52d9 100644 (file)
@@ -398,10 +398,12 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
        writel(~0, port->base + GPIO_ISR);
 
        if (mxc_gpio_hwtype == IMX21_GPIO) {
-               /* setup one handler for all GPIO interrupts */
-               if (pdev->id == 0)
-                       irq_set_chained_handler(port->irq,
-                                               mx2_gpio_irq_handler);
+               /*
+                * Setup one handler for all GPIO interrupts. Actually setting
+                * the handler is needed only once, but doing it for every port
+                * is more robust and easier.
+                */
+               irq_set_chained_handler(port->irq, mx2_gpio_irq_handler);
        } else {
                /* setup one handler for each entry */
                irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
index c4ed1722734c9eadb190c0ccaa19a9c2eb3b2d39..4fbc208c32cfa213812f6356323a36a51c37087c 100644 (file)
@@ -174,12 +174,22 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
        if (bank->dbck_enable_mask && !bank->dbck_enabled) {
                clk_enable(bank->dbck);
                bank->dbck_enabled = true;
+
+               __raw_writel(bank->dbck_enable_mask,
+                            bank->base + bank->regs->debounce_en);
        }
 }
 
 static inline void _gpio_dbck_disable(struct gpio_bank *bank)
 {
        if (bank->dbck_enable_mask && bank->dbck_enabled) {
+               /*
+                * Disable debounce before cutting it's clock. If debounce is
+                * enabled but the clock is not, GPIO module seems to be unable
+                * to detect events and generate interrupts at least on OMAP3.
+                */
+               __raw_writel(0, bank->base + bank->regs->debounce_en);
+
                clk_disable(bank->dbck);
                bank->dbck_enabled = false;
        }
@@ -1081,7 +1091,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
        bank->is_mpuio = pdata->is_mpuio;
        bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
        bank->loses_context = pdata->loses_context;
-       bank->get_context_loss_count = pdata->get_context_loss_count;
        bank->regs = pdata->regs;
 #ifdef CONFIG_OF_GPIO
        bank->chip.of_node = of_node_get(node);
@@ -1135,6 +1144,9 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
        omap_gpio_chip_init(bank);
        omap_gpio_show_rev(bank);
 
+       if (bank->loses_context)
+               bank->get_context_loss_count = pdata->get_context_loss_count;
+
        pm_runtime_put(bank->dev);
 
        list_add_tail(&bank->node, &omap_gpio_list);
index 38416be8ba1186c41b90b7dfd5df070ad3b7acee..6064fb376e11638f8900c6b6cfc9282900fff5ec 100644 (file)
@@ -383,8 +383,9 @@ static int __devinit gsta_probe(struct platform_device *dev)
        }
        spin_lock_init(&chip->lock);
        gsta_gpio_setup(chip);
-       for (i = 0; i < GSTA_NR_GPIO; i++)
-               gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+       if (gpio_pdata)
+               for (i = 0; i < GSTA_NR_GPIO; i++)
+                       gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
 
        /* 384 was used in previous code: be compatible for other drivers */
        err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
index c1ad2884f2edb0b79f3894a9a16662e5776bda51..11f29c82253c4af5fca18b09f26923a8aa3a48a3 100644 (file)
@@ -149,6 +149,9 @@ static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
        tps65910_gpio->gpio_chip.set    = tps65910_gpio_set;
        tps65910_gpio->gpio_chip.get    = tps65910_gpio_get;
        tps65910_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+       tps65910_gpio->gpio_chip.of_node = tps65910->dev->of_node;
+#endif
        if (pdata && pdata->gpio_base)
                tps65910_gpio->gpio_chip.base = pdata->gpio_base;
        else
index 92ea5350dfe96dd6907ea235bb55b2d25faac08c..aa61ad2fcaaa6573e1de350a82d08151c0040289 100644 (file)
@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
        struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
        struct wm8994 *wm8994 = wm8994_gpio->wm8994;
 
+       if (value)
+               value = WM8994_GPN_LVL;
+
        return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
-                              WM8994_GPN_DIR, 0);
+                              WM8994_GPN_DIR | WM8994_GPN_LVL, value);
 }
 
 static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
index 5873e481e5d2dadde3c16e0e0883d8e6e3340dbe..a8743c399e83234c976ebdb4b471542a0645c42d 100644 (file)
@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
        return true;
 }
 
+static bool valid_inferred_mode(const struct drm_connector *connector,
+                               const struct drm_display_mode *mode)
+{
+       struct drm_display_mode *m;
+       bool ok = false;
+
+       list_for_each_entry(m, &connector->probed_modes, head) {
+               if (mode->hdisplay == m->hdisplay &&
+                   mode->vdisplay == m->vdisplay &&
+                   drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+                       return false; /* duplicated */
+               if (mode->hdisplay <= m->hdisplay &&
+                   mode->vdisplay <= m->vdisplay)
+                       ok = true;
+       }
+       return ok;
+}
+
 static int
 drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
        struct drm_device *dev = connector->dev;
 
        for (i = 0; i < drm_num_dmt_modes; i++) {
-               if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+               if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+                   valid_inferred_mode(connector, drm_dmt_modes + i)) {
                        newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
                        if (newmode) {
                                drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
index f94792626b94fadae47303f150e7aff278d371ef..36822b924eb12974f7c73af8fe8368707ab08abf 100644 (file)
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
        }
 }
 
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       struct apertures_struct *ap;
+       struct pci_dev *pdev = dev_priv->dev->pdev;
+       bool primary;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       ap->ranges[0].base = dev_priv->dev->agp->base;
+       ap->ranges[0].size =
+               dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+       primary =
+               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+       remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+       kfree(ap);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       dev_priv->mm.gtt = intel_gtt_get();
+       if (!dev_priv->mm.gtt) {
+               DRM_ERROR("Failed to initialize GTT\n");
+               ret = -ENODEV;
+               goto put_bridge;
+       }
+
+       i915_kick_out_firmware_fb(dev_priv);
+
        pci_set_master(dev->pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto put_bridge;
        }
 
-       dev_priv->mm.gtt = intel_gtt_get();
-       if (!dev_priv->mm.gtt) {
-               DRM_ERROR("Failed to initialize GTT\n");
-               ret = -ENODEV;
-               goto out_rmmap;
-       }
-
        aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
        dev_priv->mm.gtt_mapping =
index 59d44937dd9fcc9f1c4350bc590950d5be75175f..84b648a7ddd8cf697fc07fd00a1e2eed39ff46bd 100644 (file)
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
        rdev->vm_manager.enabled = false;
 
        /* mark first vm as always in use, it's the system one */
+       /* allocate enough for 2 full VM pts */
        r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
-                                     rdev->vm_manager.max_pfn * 8,
+                                     rdev->vm_manager.max_pfn * 8 * 2,
                                      RADEON_GEM_DOMAIN_VRAM);
        if (r) {
                dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -633,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->list);
        INIT_LIST_HEAD(&vm->va);
-       vm->last_pfn = 0;
+       /* SI requires equal sized PTs for all VMs, so always set
+        * last_pfn to max_pfn.  cayman allows variable sized
+        * pts so we can grow then as needed.  Once we switch
+        * to two level pts we can unify this again.
+        */
+       if (rdev->family >= CHIP_TAHITI)
+               vm->last_pfn = rdev->vm_manager.max_pfn;
+       else
+               vm->last_pfn = 0;
        /* map the ib pool buffer at 0 in virtual address space, set
         * read only
         */
index f28bd4b7ef980937c88eb54c30b5534adc5abf3b..21ec9f5653cedc94e729e521e8cf62e4b14c2884 100644 (file)
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_busy *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                break;
        }
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_wait_idle *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
        robj = gem_to_radeon_bo(gobj);
        r = radeon_bo_wait(robj, NULL, false);
        /* callback hw specific functions if any */
-       if (robj->rdev->asic->ioctl_wait_idle)
-               robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+       if (rdev->asic->ioctl_wait_idle)
+               robj->rdev->asic->ioctl_wait_idle(rdev, robj);
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
index c7b61f16ecfd79855315b5ad938499510a2d8b41..0b0279291a73e79109dd95db5ceee4823be1da66 100644 (file)
@@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(0x15DC, 0);
 
        /* empty context1-15 */
-       /* FIXME start with 1G, once using 2 level pt switch to full
+       /* FIXME start with 4G, once using 2 level pt switch to full
         * vm size space
         */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
index fa10f847f7dbf163ba5856c296690039ba07b242..585344b6d33815632b8d477ebb1f0eb9e7f80818 100644 (file)
@@ -517,6 +517,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
index 6ac0286b53757cd80e0f8d713b7af7f332497ae3..4c87276c8ddba2f2a6f072cb536e84f8b7a6d256 100644 (file)
@@ -1503,6 +1503,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1995,6 +1998,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2089,6 +2093,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
        { }
index d1cdd2d284095ea7d140f93822ab3de7a0694b98..875ff451842b96f05cdeda4d6899b9d1ee18179c 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI   0x024c
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO    0x024d
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS    0x024e
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
 #define USB_DEVICE_ID_CRYSTALTOUCH     0x0006
 #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL        0x0007
 
+#define USB_VENDOR_ID_MADCATZ          0x0738
+#define USB_DEVICE_ID_MADCATZ_BEATPAD  0x4540
+
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_DEVICE_ID_MCC_PMD1208LS    0x007a
index e7701d99f8e8598207a18de6b6ad7f48a0796073..f1de3979181fd22517c1f068a57d4526cb0ce6e8 100644 (file)
@@ -2341,7 +2341,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
 
        /* Start monitoring */
        it87_write_value(data, IT87_REG_CONFIG,
-                        (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
+                        (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
                         | (update_vbat ? 0x41 : 0x01));
 }
 
index 61c9cf15fa52ecd50cb1a1b5421c8fef3666f24c..1201a15784c3a0eec329affa6f6edf1546df0b51 100644 (file)
@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
                spin_lock_init(&hwlock->lock);
                hwlock->bank = bank;
 
-               ret = hwspin_lock_register_single(hwlock, i);
+               ret = hwspin_lock_register_single(hwlock, base_id + i);
                if (ret)
                        goto reg_failed;
        }
@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
 
 reg_failed:
        while (--i >= 0)
-               hwspin_lock_unregister_single(i);
+               hwspin_lock_unregister_single(base_id + i);
        return ret;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_register);
index 57d19d4e0a2d94a22f68d13c6f0ee44a9ab8807b..c96653b58867deb406a13913774a7e35c074dec7 100644 (file)
@@ -282,7 +282,8 @@ static int __devinit as5011_probe(struct i2c_client *client,
 
        error = request_threaded_irq(as5011->button_irq,
                                     NULL, as5011_button_interrupt,
-                                    IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_RISING |
+                                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     "as5011_button", as5011);
        if (error < 0) {
                dev_err(&client->dev,
@@ -296,7 +297,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
 
        error = request_threaded_irq(as5011->axis_irq, NULL,
                                     as5011_axis_interrupt,
-                                    plat_data->axis_irqflags,
+                                    plat_data->axis_irqflags | IRQF_ONESHOT,
                                     "as5011_joystick", as5011);
        if (error) {
                dev_err(&client->dev,
index ee16fb67b7ae235bd9469bc50b9b0f588b99a2e4..83811e45d6339b013cc4f9cc16b90c5c66ba8af8 100644 (file)
@@ -142,6 +142,7 @@ static const struct xpad_device {
        { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
        { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
        { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+       { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
        { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
        { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
@@ -164,6 +165,7 @@ static const struct xpad_device {
        { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+       { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
        { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
 };
@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
        XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x046d),            /* Logitech X-Box 360 style controllers */
        XPAD_XBOX360_VENDOR(0x0738),            /* Mad Catz X-Box 360 controllers */
+       { USB_DEVICE(0x0738, 0x4540) },         /* Mad Catz Beat Pad */
        XPAD_XBOX360_VENDOR(0x0e6f),            /* 0x0e6f X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x12ab),            /* X-Box 360 dance pads */
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x146b),            /* BigBen Interactive Controllers */
        XPAD_XBOX360_VENDOR(0x1bad),            /* Harminix Rock Band Guitar and Drums */
-       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x1689),            /* Razer Onza */
        { }
 };
 
index 64a0ca4c92f3376562fe07c8f4855ce889417b1d..0d77f6c84950f7d921e03ae0e078a3c788b72bb6 100644 (file)
@@ -178,7 +178,8 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
        }
 
        error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
-                       IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                    client->dev.driver->name, data);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
                goto err_free_mem;
index caa218a51b5ac94bd89cade097a41cd926251dcd..7613f1cac9517c1ecf30e18f2946c872c3e25252 100644 (file)
@@ -248,7 +248,7 @@ static int __devinit mpr_touchkey_probe(struct i2c_client *client,
 
        error = request_threaded_irq(client->irq, NULL,
                                     mpr_touchkey_interrupt,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->dev.driver->name, mpr121);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
index 0b7b2f891752061a50ab92745e67e8025c146523..ca68f2992d7292ebdebafc0d6947e201448fc623 100644 (file)
@@ -201,7 +201,8 @@ static int __devinit qt1070_probe(struct i2c_client *client,
        msleep(QT1070_RESET_TIME);
 
        err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
-               IRQF_TRIGGER_NONE, client->dev.driver->name, data);
+                                  IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+                                  client->dev.driver->name, data);
        if (err) {
                dev_err(&client->dev, "fail to request irq\n");
                goto err_free_mem;
index 3afea3f897182adba5c1439dc38e7a34126fa440..c355cdde8d223e0f7184c297df95d41f3048fb43 100644 (file)
@@ -278,7 +278,8 @@ static int __devinit tca6416_keypad_probe(struct i2c_client *client,
 
                error = request_threaded_irq(chip->irqnum, NULL,
                                             tca6416_keys_isr,
-                                            IRQF_TRIGGER_FALLING,
+                                            IRQF_TRIGGER_FALLING |
+                                               IRQF_ONESHOT,
                                             "tca6416-keypad", chip);
                if (error) {
                        dev_dbg(&client->dev,
index 5f87b28b31920b92caf1644413c8cab7fe4da66a..893869b29ed9895c0f1998fbb2068ce7a41ef402 100644 (file)
@@ -360,7 +360,7 @@ static int __devinit tca8418_keypad_probe(struct i2c_client *client,
                client->irq = gpio_to_irq(client->irq);
 
        error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->name, keypad_data);
        if (error) {
                dev_dbg(&client->dev,
index a4a445fb7020bcf1df98d1a36e958c7e70d83b39..4c34f21fbe2dff4a8b823e48dd96fb4635a81d16 100644 (file)
@@ -227,15 +227,15 @@ static int __devinit keypad_probe(struct platform_device *pdev)
                goto error_clk;
        }
 
-       error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
-                                    dev_name(dev), kp);
+       error = request_threaded_irq(kp->irq_press, NULL, keypad_irq,
+                                    IRQF_ONESHOT, dev_name(dev), kp);
        if (error < 0) {
                dev_err(kp->dev, "Could not allocate keypad press key irq\n");
                goto error_irq_press;
        }
 
-       error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
-                                    dev_name(dev), kp);
+       error = request_threaded_irq(kp->irq_release, NULL, keypad_irq,
+                                    IRQF_ONESHOT, dev_name(dev), kp);
        if (error < 0) {
                dev_err(kp->dev, "Could not allocate keypad release key irq\n");
                goto error_irq_release;
index 0ac75bbad4d69d61e64719d1a2550778f121f599..2e5d5e1de64787f17c2349e1a3c575144411518f 100644 (file)
@@ -972,6 +972,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
        struct ad714x_platform_data *plat_data = dev->platform_data;
        struct ad714x_chip *ad714x;
        void *drv_mem;
+       unsigned long irqflags;
 
        struct ad714x_button_drv *bt_drv;
        struct ad714x_slider_drv *sd_drv;
@@ -1162,10 +1163,11 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
                alloc_idx++;
        }
 
+       irqflags = plat_data->irqflags ?: IRQF_TRIGGER_FALLING;
+       irqflags |= IRQF_ONESHOT;
+
        error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
-                               plat_data->irqflags ?
-                                       plat_data->irqflags : IRQF_TRIGGER_FALLING,
-                               "ad714x_captouch", ad714x);
+                                    irqflags, "ad714x_captouch", ad714x);
        if (error) {
                dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
                goto err_unreg_dev;
index 35083c6836c351ba5110f6fe3ec0cecc98b074e9..c1313d8535c349993f0bb64ba3940ffe3e32b1d2 100644 (file)
@@ -213,7 +213,8 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
        /* REVISIT:  flush the event queue? */
 
        status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
-                       IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys);
+                                     IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                     dev_name(&pdev->dev), keys);
        if (status < 0)
                goto fail2;
 
index 2cf681d98c0d2d6433a6f8d11c502935d40c202a..d528c23e194f6efcbe7efbe8651d218aa6ef7965 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI  0x0252
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO   0x0253
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS   0x0254
+/* MacbookPro10,1 (unibody, June 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 
 #define BCM5974_DEVICE(prod) {                                 \
        .match_flags = (USB_DEVICE_ID_MATCH_DEVICE |            \
@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+       /* MacbookPro10,1 */
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
        /* Terminating entry */
        {}
 };
@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
                { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
        },
+       {
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
+               HAS_INTEGRATED_BUTTON,
+               0x84, sizeof(struct bt_data),
+               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+               { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+               { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
+               { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
+       },
        {}
 };
 
index cad5602d3ce45a525ca70d9cc1cf7f766348c24e..8b31473a81fe9bd6056dd95363405f0e7610c4ee 100644 (file)
@@ -216,7 +216,7 @@ static void wacom_retrieve_report_data(struct usb_interface *intf,
 
                rep_data[0] = 12;
                result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
-                                         rep_data[0], &rep_data, 2,
+                                         rep_data[0], rep_data, 2,
                                          WAC_MSG_RETRIES);
 
                if (result >= 0 && rep_data[1] > 2)
@@ -401,7 +401,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
                                break;
 
                        case HID_USAGE_CONTACTMAX:
-                               wacom_retrieve_report_data(intf, features);
+                               /* leave touch_max as is if predefined */
+                               if (!features->touch_max)
+                                       wacom_retrieve_report_data(intf, features);
                                i++;
                                break;
                        }
index e2482b40da5198fdb1406e135fff827d049e5895..bd4eb42776973b211aee79e40f51b47e2f5e8d27 100644 (file)
@@ -597,7 +597,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
                        AD7879_TMR(ts->pen_down_acc_interval);
 
        err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
-                                  IRQF_TRIGGER_FALLING,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                   dev_name(dev), ts);
        if (err) {
                dev_err(dev, "irq %d busy?\n", ts->irq);
index 42e645062c208e842b6feb5ecaf6824004c49a0c..25fd0561a17d2f1afe83a84c28864e81510acbba 100644 (file)
@@ -1149,7 +1149,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
                goto err_free_object;
 
        error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
-                       pdata->irqflags, client->dev.driver->name, data);
+                                    pdata->irqflags | IRQF_ONESHOT,
+                                    client->dev.driver->name, data);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
                goto err_free_object;
index f2d03c06c2da6660c30f64c177164dafb432385f..5c487d23f11cbaedd29e65c5143ba611a90e7d78 100644 (file)
@@ -509,7 +509,8 @@ static int __devinit bu21013_probe(struct i2c_client *client,
        input_set_drvdata(in_dev, bu21013_data);
 
        error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
-                                    IRQF_TRIGGER_FALLING | IRQF_SHARED,
+                                    IRQF_TRIGGER_FALLING | IRQF_SHARED |
+                                       IRQF_ONESHOT,
                                     DRIVER_TP, bu21013_data);
        if (error) {
                dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
index 237753ad10318b9fab24b6d559448ea41d492480..464f1bf4b61dcbf62af90196ba28608b6896e6f8 100644 (file)
@@ -251,7 +251,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
        }
 
        err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread,
-                                  IRQF_TRIGGER_RISING, "touch_reset_key", ts);
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                  "touch_reset_key", ts);
        if (err < 0) {
                dev_err(&client->dev,
                        "irq %d busy? error %d\n", client->irq, err);
index 3cd7a837f82b203f41f5ff1aa7cefcf5b3714594..cf299377fc4980e54c02a0bfeb25c41ae2fbc5ec 100644 (file)
@@ -620,7 +620,7 @@ static int __devinit mrstouch_probe(struct platform_device *pdev)
                             MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
 
        err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
-                                  0, "mrstouch", tsdev);
+                                  IRQF_ONESHOT, "mrstouch", tsdev);
        if (err) {
                dev_err(tsdev->dev, "unable to allocate irq\n");
                goto err_free_mem;
index 72f6ba3a470937d2528207d20c53d182a0b7803f..953b4c105cad75ead98062869d7a6c0cc5237068 100644 (file)
@@ -165,7 +165,7 @@ static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
        input_set_drvdata(input, tsdata);
 
        error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->name, tsdata);
        if (error) {
                dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
index 7e74880973591bb61daa6e3317a94e43bb8f5415..368d2c6cf780cb0e9a1ffad750bace5a5d6b6e6c 100644 (file)
@@ -297,7 +297,7 @@ static int __devinit tsc_probe(struct platform_device *pdev)
                goto error_clk;
        }
 
-       error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
+       error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
                                     dev_name(dev), ts);
        if (error < 0) {
                dev_err(ts->dev, "Could not allocate ts irq\n");
index b6adeaee9cc5f0f226781f59ba79961fd9eb1e0b..5ce3fa8ce6465e049dd415b784eaafe7e0fbf7ed 100644 (file)
@@ -650,7 +650,8 @@ static int __devinit tsc2005_probe(struct spi_device *spi)
        tsc2005_stop_scan(ts);
 
        error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
-                                    IRQF_TRIGGER_RISING, "tsc2005", ts);
+                                    IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                    "tsc2005", ts);
        if (error) {
                dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
                goto err_free_mem;
index a2e418cba0ff9df7ebb0c4b5b44361a8987b829c..625626391f2d39d3802710a1677ac49ef3181c9a 100644 (file)
@@ -83,6 +83,8 @@ static struct iommu_ops amd_iommu_ops;
 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
 int amd_iommu_max_glx_val = -1;
 
+static struct dma_map_ops amd_iommu_dma_ops;
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -402,7 +404,7 @@ static void amd_iommu_stats_init(void)
                return;
 
        de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
-                                        (u32 *)&amd_iommu_unmap_flush);
+                                        &amd_iommu_unmap_flush);
 
        amd_iommu_stats_add(&compl_wait);
        amd_iommu_stats_add(&cnt_map_single);
@@ -2267,6 +2269,13 @@ static int device_change_notifier(struct notifier_block *nb,
                list_add_tail(&dma_domain->list, &iommu_pd_list);
                spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
 
+               dev_data = get_dev_data(dev);
+
+               if (!dev_data->passthrough)
+                       dev->archdata.dma_ops = &amd_iommu_dma_ops;
+               else
+                       dev->archdata.dma_ops = &nommu_dma_ops;
+
                break;
        case BUS_NOTIFY_DEL_DEVICE:
 
index 542024ba6dba2eaccd640dbece64a44222285769..a33612f3206f25f1146df2c84b254b372b306382 100644 (file)
@@ -129,7 +129,7 @@ u16 amd_iommu_last_bdf;                     /* largest PCI device id we have
                                           to handle */
 LIST_HEAD(amd_iommu_unity_map);                /* a list of required unity mappings
                                           we find in ACPI */
-bool amd_iommu_unmap_flush;            /* if true, flush on every unmap */
+u32 amd_iommu_unmap_flush;             /* if true, flush on every unmap */
 
 LIST_HEAD(amd_iommu_list);             /* list of all AMD IOMMUs in the
                                           system */
@@ -1641,6 +1641,8 @@ static int __init amd_iommu_init(void)
 
        amd_iommu_init_api();
 
+       x86_platform.iommu_shutdown = disable_iommus;
+
        if (iommu_pass_through)
                goto out;
 
@@ -1649,8 +1651,6 @@ static int __init amd_iommu_init(void)
        else
                printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
 
-       x86_platform.iommu_shutdown = disable_iommus;
-
 out:
        return ret;
 
index 24355559a2ad23cc98e27d1074bfc5ccfb826a73..c1b1d489817e2b667edbbeb95d36eae1d503a994 100644 (file)
@@ -652,7 +652,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
  * If true, the addresses will be flushed on unmap time, not when
  * they are reused
  */
-extern bool amd_iommu_unmap_flush;
+extern u32 amd_iommu_unmap_flush;
 
 /* Smallest number of PASIDs supported by any IOMMU in the system */
 extern u32 amd_iommu_max_pasids;
index ecd679043d7740e6883aae9cbee68da6321fedc1..3f3d09d560ea3c6ce5e3bc7d019e377ac9e63dfe 100644 (file)
@@ -550,13 +550,13 @@ static int alloc_pdir(struct smmu_as *as)
                return 0;
 
        as->pte_count = devm_kzalloc(smmu->dev,
-                    sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
+                    sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC);
        if (!as->pte_count) {
                dev_err(smmu->dev,
                        "failed to allocate smmu_device PTE cunters\n");
                return -ENOMEM;
        }
-       as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
+       as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA);
        if (!as->pdir_page) {
                dev_err(smmu->dev,
                        "failed to allocate smmu_device page directory\n");
index 41dc76db43118a347e4a8a06923b0fd076a12dc3..a019fbb70880bd402aea095b8b90a8ca26a0e66a 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/reboot.h>
 #include "leds.h"
 
+static int panic_heartbeats;
+
 struct heartbeat_trig_data {
        unsigned int phase;
        unsigned int period;
@@ -34,6 +36,11 @@ static void led_heartbeat_function(unsigned long data)
        unsigned long brightness = LED_OFF;
        unsigned long delay = 0;
 
+       if (unlikely(panic_heartbeats)) {
+               led_set_brightness(led_cdev, LED_OFF);
+               return;
+       }
+
        /* acts like an actual heart beat -- ie thump-thump-pause... */
        switch (heartbeat_data->phase) {
        case 0:
@@ -111,12 +118,19 @@ static int heartbeat_reboot_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static int heartbeat_panic_notifier(struct notifier_block *nb,
+                                    unsigned long code, void *unused)
+{
+       panic_heartbeats = 1;
+       return NOTIFY_DONE;
+}
+
 static struct notifier_block heartbeat_reboot_nb = {
        .notifier_call = heartbeat_reboot_notifier,
 };
 
 static struct notifier_block heartbeat_panic_nb = {
-       .notifier_call = heartbeat_reboot_notifier,
+       .notifier_call = heartbeat_panic_notifier,
 };
 
 static int __init heartbeat_trig_init(void)
index 37fdaf81bd1f89abfd28f6a46d2be7ce95f8bcba..ce59824fb41401963113a17af05ac660954bbef2 100644 (file)
@@ -2292,6 +2292,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
        if (r)
                return r;
 
+       r = dm_pool_commit_metadata(pool->pmd);
+       if (r) {
+               DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+                     __func__, r);
+               return r;
+       }
+
        r = dm_pool_reserve_metadata_snap(pool->pmd);
        if (r)
                DMWARN("reserve_metadata_snap message failed.");
index 1c2f9048e1ae010864c4d2478c05f9a510c729e1..a4c219e3c859624925125e54029aac2da6211674 100644 (file)
@@ -5784,8 +5784,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
                        super_types[mddev->major_version].
                                validate_super(mddev, rdev);
                if ((info->state & (1<<MD_DISK_SYNC)) &&
-                   (!test_bit(In_sync, &rdev->flags) ||
-                    rdev->raid_disk != info->raid_disk)) {
+                    rdev->raid_disk != info->raid_disk) {
                        /* This was a hot-add request, but events doesn't
                         * match, so reject it.
                         */
@@ -6751,7 +6750,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
        thread->tsk = kthread_run(md_thread, thread,
                                  "%s_%s",
                                  mdname(thread->mddev),
-                                 name ?: mddev->pers->name);
+                                 name);
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
@@ -7298,6 +7297,7 @@ void md_do_sync(struct mddev *mddev)
        int skipped = 0;
        struct md_rdev *rdev;
        char *desc;
+       struct blk_plug plug;
 
        /* just incase thread restarts... */
        if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7447,7 @@ void md_do_sync(struct mddev *mddev)
        }
        mddev->curr_resync_completed = j;
 
+       blk_start_plug(&plug);
        while (j < max_sectors) {
                sector_t sectors;
 
@@ -7552,6 +7553,7 @@ void md_do_sync(struct mddev *mddev)
         * this also signals 'finished resyncing' to md_stop
         */
  out:
+       blk_finish_plug(&plug);
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
        /* tell personality that we are finished */
index 9339e67fcc79a9f961d016d3f80291ce012323bb..61a1833ebaf33ec40afaf752db9a849439c9bd13 100644 (file)
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
        }
 
        {
-               mddev->thread = md_register_thread(multipathd, mddev, NULL);
+               mddev->thread = md_register_thread(multipathd, mddev,
+                                                  "multipath");
                if (!mddev->thread) {
                        printk(KERN_ERR "multipath: couldn't allocate thread"
                                " for %s\n", mdname(mddev));
index 50ed53bf4aa2b1efe1136c2520067ece16bb9b35..fc90c11620adb026c9842d6e95497248d02556e8 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/device-mapper.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
 
        ca->nr = nr_blocks;
        ca->nr_free = nr_blocks;
-       ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
-       if (!ca->counts)
-               return -ENOMEM;
+
+       if (!nr_blocks)
+               ca->counts = NULL;
+       else {
+               ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+               if (!ca->counts)
+                       return -ENOMEM;
+       }
 
        return 0;
 }
 
+static void ca_destroy(struct count_array *ca)
+{
+       vfree(ca->counts);
+}
+
 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 {
        int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
 {
        dm_block_t nr_blocks = ca->nr + extra_blocks;
-       uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+       uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
        if (!counts)
                return -ENOMEM;
 
-       memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
-       kfree(ca->counts);
+       if (ca->counts) {
+               memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+               ca_destroy(ca);
+       }
        ca->nr = nr_blocks;
        ca->nr_free += extra_blocks;
        ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
        return 0;
 }
 
-static void ca_destroy(struct count_array *ca)
-{
-       kfree(ca->counts);
-}
-
 /*----------------------------------------------------------------*/
 
 struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
index fc469ba9f6277a7c701615a1b3d788a05fa52559..3d0ed53328831627ab6ec79cb85946f36b0fe7a4 100644 (file)
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
                                       dm_block_t nr_blocks)
 {
        struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
-       return dm_sm_checker_create_fresh(sm);
+       struct dm_space_map *smc;
+
+       if (IS_ERR_OR_NULL(sm))
+               return sm;
+
+       smc = dm_sm_checker_create_fresh(sm);
+       if (IS_ERR(smc))
+               dm_sm_destroy(sm);
+
+       return smc;
 }
 EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 
index 400fe144c0cd1c94dd5c325c5ed690a425b7ece3..e5604b32d91ff0017a5a625cfd8106fca92bfe19 100644 (file)
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
 
 void dm_tm_destroy(struct dm_transaction_manager *tm)
 {
+       if (!tm->is_clone)
+               wipe_shadow_table(tm);
+
        kfree(tm);
 }
 EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
 
        } else {
                r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
        }
 
        return 0;
index a9c7981ddd245c6955cbed390b617a9172abde31..240ff3125040ab51c1a010966e6e231fafd13062 100644 (file)
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                int bad_sectors;
 
                int disk = start_disk + i;
-               if (disk >= conf->raid_disks)
-                       disk -= conf->raid_disks;
+               if (disk >= conf->raid_disks * 2)
+                       disk -= conf->raid_disks * 2;
 
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        struct md_rdev *blocked_rdev;
-       int plugged;
        int first_clone;
        int sectors_handled;
        int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
         * the bad blocks.  Each set of writes gets it's own r1bio
         * with a set of bios attached.
         */
-       plugged = mddev_check_plugged(mddev);
 
        disks = conf->raid_disks * 2;
  retry_write:
@@ -1191,6 +1189,8 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
        /* Mustn't call r1_bio_write_done before this next test,
         * as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
 
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2488,9 +2485,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
         */
        if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                atomic_set(&r1_bio->remaining, read_targets);
-               for (i = 0; i < conf->raid_disks * 2; i++) {
+               for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
                        bio = r1_bio->bios[i];
                        if (bio->bi_end_io == end_sync_read) {
+                               read_targets--;
                                md_sync_acct(bio->bi_bdev, nr_sectors);
                                generic_make_request(bio);
                        }
@@ -2621,7 +2619,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                goto abort;
        }
        err = -ENOMEM;
-       conf->thread = md_register_thread(raid1d, mddev, NULL);
+       conf->thread = md_register_thread(raid1d, mddev, "raid1");
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid1:%s: couldn't allocate thread\n",
index 99ae6068e456992ef2b16c98e01d5c2ee0362128..8da6282254c3e822a27702c469b9afce720bda43 100644 (file)
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
-       int plugged;
        int sectors_handled;
        int max_sectors;
        int sectors;
@@ -1239,7 +1238,6 @@ read_again:
         * of r10_bios is recored in bio->bi_phys_segments just as with
         * the read case.
         */
-       plugged = mddev_check_plugged(mddev);
 
        r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
        raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
 
                if (!r10_bio->devs[i].repl_bio)
                        continue;
@@ -1423,6 +1423,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
 
        /* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !mddev->bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        if (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage, WRITE)
+                                            s, conf->tmppage, WRITE)
                            == 0) {
                                /* Well, this device is dead */
                                printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        switch (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage,
+                                            s, conf->tmppage,
                                                 READ)) {
                        case 0:
                                /* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
        slot = r10_bio->read_slot;
        printk_ratelimited(
                KERN_ERR
-               "md/raid10:%s: %s: redirecting"
+               "md/raid10:%s: %s: redirecting "
                "sector %llu to another mirror\n",
                mdname(mddev),
                bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
        blk_start_plug(&plug);
        for (;;) {
 
-               flush_pending_writes(conf);
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        /* want to reconstruct this device */
                        rb2 = r10_bio;
                        sect = raid10_find_virt(conf, sector_nr, i);
+                       if (sect >= mddev->resync_max_sectors) {
+                               /* last stripe is not complete - don't
+                                * try to recover this sector.
+                                */
+                               continue;
+                       }
                        /* Unless we are doing a full sync, or a replacement
                         * we only need to recover the block if it is set in
                         * the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        spin_lock_init(&conf->resync_lock);
        init_waitqueue_head(&conf->wait_barrier);
 
-       conf->thread = md_register_thread(raid10d, mddev, NULL);
+       conf->thread = md_register_thread(raid10d, mddev, "raid10");
        if (!conf->thread)
                goto out;
 
index d26767246d26ad1d2bb9a2da371ab1bbbdf130fc..04348d76bb30fa8831964ea980ec2df912a45f92 100644 (file)
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state))
+                       if (test_bit(STRIPE_DELAYED, &sh->state) &&
+                           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
                                   sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
                        else {
+                               clear_bit(STRIPE_DELAYED, &sh->state);
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                         * a chance*/
                                        md_check_recovery(conf->mddev);
                                }
+                               /*
+                                * Because md_wait_for_blocked_rdev
+                                * will dec nr_pending, we must
+                                * increment it first.
+                                */
+                               atomic_inc(&rdev->nr_pending);
                                md_wait_for_blocked_rdev(rdev, conf->mddev);
                        } else {
                                /* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
        } else {
                const char *bdn = bdevname(rdev->bdev, b);
                int retry = 0;
+               int set_bad = 0;
 
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
                atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (conf->mddev->degraded >= conf->max_degraded)
+               else if (conf->mddev->degraded >= conf->max_degraded) {
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+               } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
                        /* Oh, no!!! */
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (atomic_read(&rdev->read_errors)
+               else if (atomic_read(&rdev->read_errors)
                         > conf->max_nr_stripes)
                        printk(KERN_WARNING
                               "md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
                else {
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
-                       md_error(conf->mddev, rdev);
+                       if (!(set_bad
+                             && test_bit(In_sync, &rdev->flags)
+                             && rdev_set_badblocks(
+                                     rdev, sh->sector, STRIPE_SECTORS, 0)))
+                               md_error(conf->mddev, rdev);
                }
        }
        rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
 
 finish:
        /* wait for this device to become unblocked */
-       if (conf->mddev->external && unlikely(s.blocked_rdev))
-               md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+       if (unlikely(s.blocked_rdev)) {
+               if (conf->mddev->external)
+                       md_wait_for_blocked_rdev(s.blocked_rdev,
+                                                conf->mddev);
+               else
+                       /* Internal metadata will immediately
+                        * be written by raid5d, so we don't
+                        * need to wait here.
+                        */
+                       rdev_dec_pending(s.blocked_rdev,
+                                        conf->mddev);
+       }
 
        if (s.handle_bad_blocks)
                for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                raid_bio->bi_next = (void*)rdev;
                align_bi->bi_bdev =  rdev->bdev;
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
-               /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
 
                if (!bio_fits_rdev(align_bi) ||
                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                        return 0;
                }
 
+               /* No reshape active, so we can trust rdev->data_offset */
+               align_bi->bi_sector += rdev->data_offset;
+
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
                                    conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
-       int plugged;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
-       plugged = mddev_check_plugged(mddev);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        if ((bi->bi_rw & REQ_SYNC) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
+                       mddev_check_plugged(mddev);
                        release_stripe(sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        break;
                }
-                       
        }
-       if (!plugged)
-               md_wakeup_thread(mddev->thread);
 
        spin_lock_irq(&conf->device_lock);
        remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        int raid_disk, memory, max_disks;
        struct md_rdev *rdev;
        struct disk_info *disk;
+       char pers_name[6];
 
        if (mddev->new_level != 5
            && mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
                       mdname(mddev), memory);
 
-       conf->thread = md_register_thread(raid5d, mddev, NULL);
+       sprintf(pers_name, "raid%d", mddev->new_level);
+       conf->thread = md_register_thread(raid5d, mddev, pers_name);
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (rdev->saved_raid_disk >= 0 &&
            rdev->saved_raid_disk >= first &&
            conf->disks[rdev->saved_raid_disk].rdev == NULL)
-               disk = rdev->saved_raid_disk;
-       else
-               disk = first;
-       for ( ; disk <= last ; disk++) {
+               first = rdev->saved_raid_disk;
+
+       for (disk = first; disk <= last; disk++) {
                p = conf->disks + disk;
                if (p->rdev == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        if (rdev->saved_raid_disk != disk)
                                conf->fullsync = 1;
                        rcu_assign_pointer(p->rdev, rdev);
-                       break;
+                       goto out;
                }
+       }
+       for (disk = first; disk <= last; disk++) {
+               p = conf->disks + disk;
                if (test_bit(WantReplacement, &p->rdev->flags) &&
                    p->replacement == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
+out:
        print_raid5_conf(conf);
        return err;
 }
index 00a67326c1931dd01496a1fdb8544ae0ceb77206..39eab73b01ae9bb5e0a9c4d9667e3c99b4b51415 100644 (file)
@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
        if (minor == MAX_DVB_MINORS) {
                kfree(dvbdevfops);
                kfree(dvbdev);
+               up_write(&minor_rwsem);
                mutex_unlock(&dvbdev_register_lock);
                return -EINVAL;
        }
index 342c2c8c1ddfcff71f493f3f6f08a4f1969df440..54ee34872d143cea7345f60a5ad9038e91266a51 100644 (file)
@@ -232,7 +232,7 @@ MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
 
 static bool txandrx; /* default = 0 */
 module_param(txandrx, bool, 0444);
-MODULE_PARM_DESC(invert, "Allow simultaneous TX and RX");
+MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX");
 
 static unsigned int wake_sc = 0x800F040C;
 module_param(wake_sc, uint, 0644);
@@ -1032,6 +1032,8 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
        data->dev->tx_ir = wbcir_tx;
        data->dev->priv = data;
        data->dev->dev.parent = &device->dev;
+       data->dev->timeout = MS_TO_NS(100);
+       data->dev->allowed_protos = RC_TYPE_ALL;
 
        if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
                dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
index 068f78dc5d13fa0268eef854b4956d18f7521bba..b4c99c7270cf8fc8d28d26226248208f8b930dad 100644 (file)
@@ -307,7 +307,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
                urb->context = dev;
                urb->pipe = usb_rcvisocpipe(dev->udev,
                                                dev->adev.end_point_addr);
-               urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = URB_ISO_ASAP;
                urb->transfer_buffer = dev->adev.transfer_buffer[i];
                urb->interval = 1;
                urb->complete = cx231xx_audio_isocirq;
@@ -368,7 +368,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
                urb->context = dev;
                urb->pipe = usb_rcvbulkpipe(dev->udev,
                                                dev->adev.end_point_addr);
-               urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = 0;
                urb->transfer_buffer = dev->adev.transfer_buffer[i];
                urb->complete = cx231xx_audio_bulkirq;
                urb->transfer_buffer_length = sb_size;
index 3d15314e1f88d1ff71924a7a65362dd13b75fd12..ac7db52f404ffbc9c95207f7a2bd5b1414aa4187 100644 (file)
@@ -448,7 +448,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
                        return -ENOMEM;
                }
                dev->vbi_mode.bulk_ctl.urb[i] = urb;
-               urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = 0;
 
                dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
                    kzalloc(sb_size, GFP_KERNEL);
index 13739e002a63fb6460bec16572bad10581fcb998..080e11157e5fe89afdb384c702376d80528daf5d 100644 (file)
@@ -127,22 +127,37 @@ struct cx23885_board cx23885_boards[] = {
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1250] = {
                .name           = "Hauppauge WinTV-HVR1250",
+               .porta          = CX23885_ANALOG_VIDEO,
                .portc          = CX23885_MPEG_DVB,
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
+               .tuner_type     = TUNER_PHILIPS_TDA8290,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .tuner_bus      = 1,
+#endif
+               .force_bff      = 1,
                .input          = {{
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
                        .type   = CX23885_VMUX_TELEVISION,
-                       .vmux   = 0,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1,
+                       .amux   = CX25840_AUDIO8,
                        .gpio0  = 0xff00,
                }, {
-                       .type   = CX23885_VMUX_DEBUG,
-                       .vmux   = 0,
-                       .gpio0  = 0xff01,
-               }, {
+#endif
                        .type   = CX23885_VMUX_COMPOSITE1,
-                       .vmux   = 1,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN6_CH1,
+                       .amux   = CX25840_AUDIO7,
                        .gpio0  = 0xff02,
                }, {
                        .type   = CX23885_VMUX_SVIDEO,
-                       .vmux   = 2,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
                        .gpio0  = 0xff02,
                } },
        },
@@ -267,7 +282,55 @@ struct cx23885_board cx23885_boards[] = {
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1255] = {
                .name           = "Hauppauge WinTV-HVR1255",
+               .porta          = CX23885_ANALOG_VIDEO,
+               .portc          = CX23885_MPEG_DVB,
+               .tuner_type     = TUNER_ABSENT,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .force_bff      = 1,
+               .input          = {{
+                       .type   = CX23885_VMUX_TELEVISION,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1 |
+                                       CX25840_DIF_ON,
+                       .amux   = CX25840_AUDIO8,
+               }, {
+                       .type   = CX23885_VMUX_COMPOSITE1,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN6_CH1,
+                       .amux   = CX25840_AUDIO7,
+               }, {
+                       .type   = CX23885_VMUX_SVIDEO,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
+               } },
+       },
+       [CX23885_BOARD_HAUPPAUGE_HVR1255_22111] = {
+               .name           = "Hauppauge WinTV-HVR1255",
+               .porta          = CX23885_ANALOG_VIDEO,
                .portc          = CX23885_MPEG_DVB,
+               .tuner_type     = TUNER_ABSENT,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .force_bff      = 1,
+               .input          = {{
+                       .type   = CX23885_VMUX_TELEVISION,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1 |
+                                       CX25840_DIF_ON,
+                       .amux   = CX25840_AUDIO8,
+               }, {
+                       .type   = CX23885_VMUX_SVIDEO,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
+               } },
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1210] = {
                .name           = "Hauppauge WinTV-HVR1210",
@@ -624,7 +687,7 @@ struct cx23885_subid cx23885_subids[] = {
        }, {
                .subvendor = 0x0070,
                .subdevice = 0x2259,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR1255,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR1255_22111,
        }, {
                .subvendor = 0x0070,
                .subdevice = 0x2291,
@@ -900,7 +963,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
        struct cx23885_dev *dev = port->dev;
        u32 bitmask = 0;
 
-       if (command == XC2028_RESET_CLK)
+       if ((command == XC2028_RESET_CLK) || (command == XC2028_I2C_FLUSH))
                return 0;
 
        if (command != 0) {
@@ -1130,6 +1193,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
                /* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
                /* GPIO-6 I2C Gate which can isolate the demod from the bus */
@@ -1267,6 +1331,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1400:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
                /* FIXME: Implement me */
                break;
@@ -1424,6 +1489,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1511,6 +1577,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
        case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1526,10 +1593,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
         */
        switch (dev->board) {
        case CX23885_BOARD_TEVII_S470:
-       case CX23885_BOARD_HAUPPAUGE_HVR1250:
                /* Currently only enabled for the integrated IR controller */
                if (!enable_885_ir)
                        break;
+       case CX23885_BOARD_HAUPPAUGE_HVR1250:
        case CX23885_BOARD_HAUPPAUGE_HVR1800:
        case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
        case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1539,6 +1606,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
        case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
        case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_MYGICA_X8506:
index a80a92c474558a4c555a473c89aa1df568c88288..cd542684ba022c4f194928e37cfc318d6a1b0b79 100644 (file)
@@ -712,6 +712,7 @@ static int dvb_register(struct cx23885_tsport *port)
                }
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
                i2c_bus = &dev->i2c_bus[0];
                fe0->dvb.frontend = dvb_attach(s5h1411_attach,
                                               &hcw_s5h1411_config,
@@ -721,6 +722,11 @@ static int dvb_register(struct cx23885_tsport *port)
                                   0x60, &dev->i2c_bus[1].i2c_adap,
                                   &hauppauge_tda18271_config);
                }
+
+               tda18271_attach(&dev->ts1.analog_fe,
+                       0x60, &dev->i2c_bus[1].i2c_adap,
+                       &hauppauge_tda18271_config);
+
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR1800:
                i2c_bus = &dev->i2c_bus[0];
index c654bdc7ccb201dd4e285c0b7ddfe703ab89cb61..22f8e7fbd6656fe81f33f824832a38cc7e2dd14c 100644 (file)
@@ -505,6 +505,9 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
 
        if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) ||
                (dev->board == CX23885_BOARD_MPX885) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1250) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
                (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) {
                /* Configure audio routing */
                v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
@@ -1578,7 +1581,9 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
 
        fe = vfe->dvb.frontend;
 
-       if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)
+       if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
+           (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+           (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111))
                fe = &dev->ts1.analog_fe;
 
        if (fe && fe->ops.tuner_ops.set_analog_params) {
@@ -1608,6 +1613,8 @@ int cx23885_set_frequency(struct file *file, void *priv,
        int ret;
 
        switch (dev->board) {
+       case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
                ret = cx23885_set_freq_via_ops(dev, f);
                break;
index d884784a1c8582f8a97c379dacc5ef603d2f934c..13c37ec07ae7e250b54694aad7c821c670fd584f 100644 (file)
@@ -90,6 +90,7 @@
 #define CX23885_BOARD_MYGICA_X8507             33
 #define CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL 34
 #define CX23885_BOARD_TEVII_S471               35
+#define CX23885_BOARD_HAUPPAUGE_HVR1255_22111  36
 
 #define GPIO_0 0x00000001
 #define GPIO_1 0x00000002
index fc1ff69cffd0d4917e86292573691be2436282fc..d8eac3e30a7ea99217e5b149de79752e73f594cf 100644 (file)
@@ -84,7 +84,7 @@ MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
 
 
 /* ----------------------------------------------------------------------- */
-static void cx23885_std_setup(struct i2c_client *client);
+static void cx23888_std_setup(struct i2c_client *client);
 
 int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
 {
@@ -638,10 +638,13 @@ static void cx23885_initialize(struct i2c_client *client)
        finish_wait(&state->fw_wait, &wait);
        destroy_workqueue(q);
 
-       /* Call the cx23885 specific std setup func, we no longer rely on
+       /* Call the cx23888 specific std setup func, we no longer rely on
         * the generic cx24840 func.
         */
-       cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
+       else
+               cx25840_std_setup(client);
 
        /* (re)set input */
        set_input(client, state->vid_input, state->aud_input);
@@ -1103,9 +1106,23 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
 
                        cx25840_write4(client, 0x410, 0xffff0dbf);
                        cx25840_write4(client, 0x414, 0x00137d03);
-                       cx25840_write4(client, 0x418, 0x01008080);
+
+                       /* on the 887, 0x418 is HSCALE_CTRL, on the 888 it is 
+                          CHROMA_CTRL */
+                       if (is_cx23888(state))
+                               cx25840_write4(client, 0x418, 0x01008080);
+                       else
+                               cx25840_write4(client, 0x418, 0x01000000);
+
                        cx25840_write4(client, 0x41c, 0x00000000);
-                       cx25840_write4(client, 0x420, 0x001c3e0f);
+
+                       /* on the 887, 0x420 is CHROMA_CTRL, on the 888 it is 
+                          CRUSH_CTRL */
+                       if (is_cx23888(state))
+                               cx25840_write4(client, 0x420, 0x001c3e0f);
+                       else
+                               cx25840_write4(client, 0x420, 0x001c8282);
+
                        cx25840_write4(client, 0x42c, 0x42600000);
                        cx25840_write4(client, 0x430, 0x0000039b);
                        cx25840_write4(client, 0x438, 0x00000000);
@@ -1233,7 +1250,7 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
                cx25840_write4(client, 0x8d0, 0x1f063870);
        }
 
-       if (is_cx2388x(state)) {
+       if (is_cx23888(state)) {
                /* HVR1850 */
                /* AUD_IO_CTRL - I2S Input, Parallel1*/
                /*  - Channel 1 src - Parallel1 (Merlin out) */
@@ -1298,8 +1315,8 @@ static int set_v4lstd(struct i2c_client *client)
        }
        cx25840_and_or(client, 0x400, ~0xf, fmt);
        cx25840_and_or(client, 0x403, ~0x3, pal_m);
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
        else
                cx25840_std_setup(client);
        if (!is_cx2583x(state))
@@ -1312,6 +1329,7 @@ static int set_v4lstd(struct i2c_client *client)
 static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct v4l2_subdev *sd = to_sd(ctrl);
+       struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
        switch (ctrl->id) {
@@ -1324,12 +1342,20 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
                break;
 
        case V4L2_CID_SATURATION:
-               cx25840_write(client, 0x420, ctrl->val << 1);
-               cx25840_write(client, 0x421, ctrl->val << 1);
+               if (is_cx23888(state)) {
+                       cx25840_write(client, 0x418, ctrl->val << 1);
+                       cx25840_write(client, 0x419, ctrl->val << 1);
+               } else {
+                       cx25840_write(client, 0x420, ctrl->val << 1);
+                       cx25840_write(client, 0x421, ctrl->val << 1);
+               }
                break;
 
        case V4L2_CID_HUE:
-               cx25840_write(client, 0x422, ctrl->val);
+               if (is_cx23888(state))
+                       cx25840_write(client, 0x41a, ctrl->val);
+               else
+                       cx25840_write(client, 0x422, ctrl->val);
                break;
 
        default:
@@ -1354,11 +1380,21 @@ static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
        fmt->field = V4L2_FIELD_INTERLACED;
        fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
-       Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
-       Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+       if (is_cx23888(state)) {
+               Vsrc = (cx25840_read(client, 0x42a) & 0x3f) << 4;
+               Vsrc |= (cx25840_read(client, 0x429) & 0xf0) >> 4;
+       } else {
+               Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
+               Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+       }
 
-       Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
-       Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+       if (is_cx23888(state)) {
+               Hsrc = (cx25840_read(client, 0x426) & 0x3f) << 4;
+               Hsrc |= (cx25840_read(client, 0x425) & 0xf0) >> 4;
+       } else {
+               Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
+               Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+       }
 
        Vlines = fmt->height + (is_50Hz ? 4 : 7);
 
@@ -1782,8 +1818,8 @@ static int cx25840_s_video_routing(struct v4l2_subdev *sd,
        struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
 
        return set_input(client, input, state->aud_input);
 }
@@ -1794,8 +1830,8 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
        struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
        return set_input(client, state->vid_input, input);
 }
 
@@ -4939,7 +4975,7 @@ void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
        }
 }
 
-static void cx23885_std_setup(struct i2c_client *client)
+static void cx23888_std_setup(struct i2c_client *client)
 {
        struct cx25840_state *state = to_state(i2c_get_clientdata(client));
        v4l2_std_id std = state->std;
index 92da7c28b6f096f9f7c37a79aef39cdd126aae5a..862c6575c55791fa7a6f4488d7cd933226fd02cb 100644 (file)
@@ -2893,7 +2893,7 @@ static void request_module_async(struct work_struct *work)
 
        if (dev->board.has_dvb)
                request_module("em28xx-dvb");
-       if (dev->board.has_ir_i2c && !disable_ir)
+       if (dev->board.ir_codes && !disable_ir)
                request_module("em28xx-rc");
 }
 
index 6c31e46a1fd2ed4356b829abf916fee6e215b63e..b9c6f17eabb245fde118e3b198f163782d92a7bc 100644 (file)
@@ -2070,10 +2070,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
        set_gamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
        set_redblue(gspca_dev, v4l2_ctrl_g_ctrl(sd->blue),
                        v4l2_ctrl_g_ctrl(sd->red));
-       set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
-       set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
-       set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
-                       v4l2_ctrl_g_ctrl(sd->vflip));
+       if (sd->gain)
+               set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
+       if (sd->exposure)
+               set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
+       if (sd->hflip)
+               set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
+                               v4l2_ctrl_g_ctrl(sd->vflip));
 
        reg_w1(gspca_dev, 0x1007, 0x20);
        reg_w1(gspca_dev, 0x1061, 0x03);
@@ -2176,7 +2179,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
        int avg_lum;
 
-       if (!v4l2_ctrl_g_ctrl(sd->autogain))
+       if (sd->autogain == NULL || !v4l2_ctrl_g_ctrl(sd->autogain))
                return;
 
        avg_lum = atomic_read(&sd->avg_lum);
index 41f9a254b24590a572223c35909255017e392612..637bde8aca28e25c2799cd85aded6eedafa3075c 100644 (file)
@@ -83,6 +83,7 @@
 #define CSICR1_INV_DATA                (1 << 3)
 #define CSICR1_INV_PCLK                (1 << 2)
 #define CSICR1_REDGE           (1 << 1)
+#define CSICR1_FMT_MASK                (CSICR1_PACK_DIR | CSICR1_SWAP16_EN)
 
 #define SHIFT_STATFF_LEVEL     22
 #define SHIFT_RXFF_LEVEL       19
@@ -230,6 +231,7 @@ struct mx2_prp_cfg {
        u32 src_pixel;
        u32 ch1_pixel;
        u32 irq_flags;
+       u32 csicr1;
 };
 
 /* prp resizing parameters */
@@ -330,6 +332,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .ch1_pixel      = 0x2ca00565, /* RGB565 */
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
                                                PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+                       .csicr1         = 0,
                }
        },
        {
@@ -343,6 +346,21 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
                                        PRP_INTR_CH2FC | PRP_INTR_LBOVF |
                                        PRP_INTR_CH2OVF,
+                       .csicr1         = CSICR1_PACK_DIR,
+               }
+       },
+       {
+               .in_fmt         = V4L2_MBUS_FMT_UYVY8_2X8,
+               .out_fmt        = V4L2_PIX_FMT_YUV420,
+               .cfg            = {
+                       .channel        = 2,
+                       .in_fmt         = PRP_CNTL_DATA_IN_YUV422,
+                       .out_fmt        = PRP_CNTL_CH2_OUT_YUV420,
+                       .src_pixel      = 0x22000888, /* YUV422 (YUYV) */
+                       .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
+                                       PRP_INTR_CH2FC | PRP_INTR_LBOVF |
+                                       PRP_INTR_CH2OVF,
+                       .csicr1         = CSICR1_SWAP16_EN,
                }
        },
 };
@@ -1015,14 +1033,14 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
                return ret;
        }
 
+       csicr1 = (csicr1 & ~CSICR1_FMT_MASK) | pcdev->emma_prp->cfg.csicr1;
+
        if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
                csicr1 |= CSICR1_REDGE;
        if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
                csicr1 |= CSICR1_SOF_POL;
        if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
                csicr1 |= CSICR1_HSYNC_POL;
-       if (pcdev->platform_flags & MX2_CAMERA_SWAP16)
-               csicr1 |= CSICR1_SWAP16_EN;
        if (pcdev->platform_flags & MX2_CAMERA_EXT_VSYNC)
                csicr1 |= CSICR1_EXT_VSYNC;
        if (pcdev->platform_flags & MX2_CAMERA_CCIR)
@@ -1033,8 +1051,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
                csicr1 |= CSICR1_GCLK_MODE;
        if (pcdev->platform_flags & MX2_CAMERA_INV_DATA)
                csicr1 |= CSICR1_INV_DATA;
-       if (pcdev->platform_flags & MX2_CAMERA_PACK_DIR_MSB)
-               csicr1 |= CSICR1_PACK_DIR;
 
        pcdev->csicr1 = csicr1;
 
@@ -1109,7 +1125,8 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
                return 0;
        }
 
-       if (code == V4L2_MBUS_FMT_YUYV8_2X8) {
+       if (code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+           code == V4L2_MBUS_FMT_UYVY8_2X8) {
                formats++;
                if (xlate) {
                        /*
index 8a4935ecc655e9c114cd2f7defb51498f50d38c5..dd91da26f1b088f66bdb40b212fa9b1c84a46b5a 100644 (file)
@@ -888,12 +888,12 @@ static const struct preview_update update_attrs[] = {
                preview_config_contrast,
                NULL,
                offsetof(struct prev_params, contrast),
-               0, true,
+               0, 0, true,
        }, /* OMAP3ISP_PREV_BRIGHTNESS */ {
                preview_config_brightness,
                NULL,
                offsetof(struct prev_params, brightness),
-               0, true,
+               0, 0, true,
        },
 };
 
@@ -1102,7 +1102,7 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
        unsigned int elv = prev->crop.top + prev->crop.height - 1;
        u32 features;
 
-       if (format->code == V4L2_MBUS_FMT_Y10_1X10) {
+       if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
                sph -= 2;
                eph += 2;
                slv -= 2;
index c370c2d87c1798594c794d744c3dbf1bb08859cd..b4c679b3fb0f7a052550a7d5a71ea42367e49ceb 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/fs.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
index 354574591908ee9cbbea18a5c518e04e2b9b4839..725812aa0c3044f5ffa31190a734e75ffcc9c01b 100644 (file)
@@ -350,7 +350,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
                if (pixm)
                        sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
                else
-                       sizes[i] = size;
+                       sizes[i] = max_t(u32, size, frame->payload[i]);
+
                allocators[i] = ctx->fimc_dev->alloc_ctx;
        }
 
@@ -479,37 +480,39 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc);
 static int fimc_capture_open(struct file *file)
 {
        struct fimc_dev *fimc = video_drvdata(file);
-       int ret = v4l2_fh_open(file);
-
-       if (ret)
-               return ret;
+       int ret;
 
        dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
 
-       /* Return if the corresponding video mem2mem node is already opened. */
        if (fimc_m2m_active(fimc))
                return -EBUSY;
 
        set_bit(ST_CAPT_BUSY, &fimc->state);
-       pm_runtime_get_sync(&fimc->pdev->dev);
+       ret = pm_runtime_get_sync(&fimc->pdev->dev);
+       if (ret < 0)
+               return ret;
 
-       if (++fimc->vid_cap.refcnt == 1) {
-               ret = fimc_pipeline_initialize(&fimc->pipeline,
-                              &fimc->vid_cap.vfd->entity, true);
-               if (ret < 0) {
-                       dev_err(&fimc->pdev->dev,
-                               "Video pipeline initialization failed\n");
-                       pm_runtime_put_sync(&fimc->pdev->dev);
-                       fimc->vid_cap.refcnt--;
-                       v4l2_fh_release(file);
-                       clear_bit(ST_CAPT_BUSY, &fimc->state);
-                       return ret;
-               }
-               ret = fimc_capture_ctrls_create(fimc);
+       ret = v4l2_fh_open(file);
+       if (ret)
+               return ret;
 
-               if (!ret && !fimc->vid_cap.user_subdev_api)
-                       ret = fimc_capture_set_default_format(fimc);
+       if (++fimc->vid_cap.refcnt != 1)
+               return 0;
+
+       ret = fimc_pipeline_initialize(&fimc->pipeline,
+                                      &fimc->vid_cap.vfd->entity, true);
+       if (ret < 0) {
+               clear_bit(ST_CAPT_BUSY, &fimc->state);
+               pm_runtime_put_sync(&fimc->pdev->dev);
+               fimc->vid_cap.refcnt--;
+               v4l2_fh_release(file);
+               return ret;
        }
+       ret = fimc_capture_ctrls_create(fimc);
+
+       if (!ret && !fimc->vid_cap.user_subdev_api)
+               ret = fimc_capture_set_default_format(fimc);
+
        return ret;
 }
 
@@ -818,9 +821,6 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
        struct fimc_dev *fimc = video_drvdata(file);
        struct fimc_ctx *ctx = fimc->vid_cap.ctx;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
-
        return fimc_fill_format(&ctx->d_frame, f);
 }
 
@@ -833,9 +833,6 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
        struct v4l2_mbus_framefmt mf;
        struct fimc_fmt *ffmt = NULL;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
-
        if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
                fimc_capture_try_format(ctx, &pix->width, &pix->height,
                                        NULL, &pix->pixelformat,
@@ -887,8 +884,6 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
        struct fimc_fmt *s_fmt = NULL;
        int ret, i;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
        if (vb2_is_busy(&fimc->vid_cap.vbq))
                return -EBUSY;
 
@@ -924,10 +919,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
                pix->width  = mf->width;
                pix->height = mf->height;
        }
+
        fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
        for (i = 0; i < ff->fmt->colplanes; i++)
-               ff->payload[i] =
-                       (pix->width * pix->height * ff->fmt->depth[i]) / 8;
+               ff->payload[i] = pix->plane_fmt[i].sizeimage;
 
        set_frame_bounds(ff, pix->width, pix->height);
        /* Reset the composition rectangle if not yet configured */
@@ -1045,18 +1040,22 @@ static int fimc_cap_streamon(struct file *file, void *priv,
 {
        struct fimc_dev *fimc = video_drvdata(file);
        struct fimc_pipeline *p = &fimc->pipeline;
+       struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
        int ret;
 
        if (fimc_capture_active(fimc))
                return -EBUSY;
 
-       media_entity_pipeline_start(&p->subdevs[IDX_SENSOR]->entity,
-                                   p->m_pipeline);
+       ret = media_entity_pipeline_start(&sd->entity, p->m_pipeline);
+       if (ret < 0)
+               return ret;
 
        if (fimc->vid_cap.user_subdev_api) {
                ret = fimc_pipeline_validate(fimc);
-               if (ret)
+               if (ret < 0) {
+                       media_entity_pipeline_stop(&sd->entity);
                        return ret;
+               }
        }
        return vb2_streamon(&fimc->vid_cap.vbq, type);
 }
index 92fc5a20fb768c5266b9e7d7506b20c9d0de511d..a4646ca1d56f31fb30eb35d7cd9829fa5aa9f46a 100644 (file)
@@ -153,7 +153,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 2,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
+               .name           = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
                .fourcc         = V4L2_PIX_FMT_NV12M,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 4 },
@@ -161,7 +161,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 2,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
+               .name           = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
                .fourcc         = V4L2_PIX_FMT_YUV420M,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 2, 2 },
@@ -169,7 +169,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 3,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
+               .name           = "YUV 4:2:0 non-contig. 2p, tiled",
                .fourcc         = V4L2_PIX_FMT_NV12MT,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 4 },
@@ -641,7 +641,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
        if (!ctrls->ready)
                return;
 
-       mutex_lock(&ctrls->handler.lock);
+       mutex_lock(ctrls->handler.lock);
        v4l2_ctrl_activate(ctrls->rotate, active);
        v4l2_ctrl_activate(ctrls->hflip, active);
        v4l2_ctrl_activate(ctrls->vflip, active);
@@ -660,7 +660,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
                ctx->hflip    = 0;
                ctx->vflip    = 0;
        }
-       mutex_unlock(&ctrls->handler.lock);
+       mutex_unlock(ctrls->handler.lock);
 }
 
 /* Update maximum value of the alpha color control */
@@ -741,8 +741,8 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
        pix->width = width;
 
        for (i = 0; i < pix->num_planes; ++i) {
-               u32 bpl = pix->plane_fmt[i].bytesperline;
-               u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
+               struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+               u32 bpl = plane_fmt->bytesperline;
 
                if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
                        bpl = pix->width; /* Planar */
@@ -754,8 +754,9 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
                if (i == 0) /* Same bytesperline for each plane. */
                        bytesperline = bpl;
 
-               pix->plane_fmt[i].bytesperline = bytesperline;
-               *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
+               plane_fmt->bytesperline = bytesperline;
+               plane_fmt->sizeimage = max((pix->width * pix->height *
+                                  fmt->depth[i]) / 8, plane_fmt->sizeimage);
        }
 }
 
index 400d701aef04126d8d3b469a51b7a9adb360985c..74ff310db30cd6755393b5b6eaf2a933495fa0e9 100644 (file)
@@ -451,34 +451,44 @@ static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
 static int fimc_lite_open(struct file *file)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       int ret = v4l2_fh_open(file);
+       int ret;
 
-       if (ret)
-               return ret;
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
 
        set_bit(ST_FLITE_IN_USE, &fimc->state);
-       pm_runtime_get_sync(&fimc->pdev->dev);
+       ret = pm_runtime_get_sync(&fimc->pdev->dev);
+       if (ret < 0)
+               goto done;
 
-       if (++fimc->ref_count != 1 || fimc->out_path != FIMC_IO_DMA)
-               return ret;
+       ret = v4l2_fh_open(file);
+       if (ret < 0)
+               goto done;
 
-       ret = fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity,
-                                      true);
-       if (ret < 0) {
-               v4l2_err(fimc->vfd, "Video pipeline initialization failed\n");
-               pm_runtime_put_sync(&fimc->pdev->dev);
-               fimc->ref_count--;
-               v4l2_fh_release(file);
-               clear_bit(ST_FLITE_IN_USE, &fimc->state);
-       }
+       if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+               ret = fimc_pipeline_initialize(&fimc->pipeline,
+                                              &fimc->vfd->entity, true);
+               if (ret < 0) {
+                       pm_runtime_put_sync(&fimc->pdev->dev);
+                       fimc->ref_count--;
+                       v4l2_fh_release(file);
+                       clear_bit(ST_FLITE_IN_USE, &fimc->state);
+               }
 
-       fimc_lite_clear_event_counters(fimc);
+               fimc_lite_clear_event_counters(fimc);
+       }
+done:
+       mutex_unlock(&fimc->lock);
        return ret;
 }
 
 static int fimc_lite_close(struct file *file)
 {
        struct fimc_lite *fimc = video_drvdata(file);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
 
        if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
                clear_bit(ST_FLITE_IN_USE, &fimc->state);
@@ -492,20 +502,39 @@ static int fimc_lite_close(struct file *file)
        if (fimc->ref_count == 0)
                vb2_queue_release(&fimc->vb_queue);
 
-       return v4l2_fh_release(file);
+       ret = v4l2_fh_release(file);
+
+       mutex_unlock(&fimc->lock);
+       return ret;
 }
 
 static unsigned int fimc_lite_poll(struct file *file,
                                   struct poll_table_struct *wait)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       return vb2_poll(&fimc->vb_queue, file, wait);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return POLL_ERR;
+
+       ret = vb2_poll(&fimc->vb_queue, file, wait);
+       mutex_unlock(&fimc->lock);
+
+       return ret;
 }
 
 static int fimc_lite_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       return vb2_mmap(&fimc->vb_queue, vma);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
+
+       ret = vb2_mmap(&fimc->vb_queue, vma);
+       mutex_unlock(&fimc->lock);
+
+       return ret;
 }
 
 static const struct v4l2_file_operations fimc_lite_fops = {
@@ -762,7 +791,9 @@ static int fimc_lite_streamon(struct file *file, void *priv,
        if (fimc_lite_active(fimc))
                return -EBUSY;
 
-       media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+       ret = media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+       if (ret < 0)
+               return ret;
 
        ret = fimc_pipeline_validate(fimc);
        if (ret) {
@@ -1508,7 +1539,7 @@ static int fimc_lite_suspend(struct device *dev)
                return 0;
 
        ret = fimc_lite_stop_capture(fimc, suspend);
-       if (ret)
+       if (ret < 0 || !fimc_lite_active(fimc))
                return ret;
 
        return fimc_pipeline_shutdown(&fimc->pipeline);
index 6753c45631b856e1a06d19492e56e97edf01f9f6..52cef4865423ef2be451df098d0c2ada3412fe21 100644 (file)
@@ -193,9 +193,13 @@ int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
 
 int fimc_pipeline_shutdown(struct fimc_pipeline *p)
 {
-       struct media_entity *me = &p->subdevs[IDX_SENSOR]->entity;
+       struct media_entity *me;
        int ret;
 
+       if (!p || !p->subdevs[IDX_SENSOR])
+               return -EINVAL;
+
+       me = &p->subdevs[IDX_SENSOR]->entity;
        mutex_lock(&me->parent->graph_mutex);
        ret = __fimc_pipeline_shutdown(p);
        mutex_unlock(&me->parent->graph_mutex);
@@ -498,12 +502,12 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
  * @source: the source entity to create links to all fimc entities from
  * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
  * @pad: the source entity pad index
- * @fimc_id: index of the fimc device for which link should be enabled
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
  */
 static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                                            struct media_entity *source,
                                            struct v4l2_subdev *sensor,
-                                           int pad, int fimc_id)
+                                           int pad, int link_mask)
 {
        struct fimc_sensor_info *s_info;
        struct media_entity *sink;
@@ -520,7 +524,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                if (!fmd->fimc[i]->variant->has_cam_if)
                        continue;
 
-               flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+               flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
 
                sink = &fmd->fimc[i]->vid_cap.subdev.entity;
                ret = media_entity_create_link(source, pad, sink,
@@ -552,7 +556,10 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                if (!fmd->fimc_lite[i])
                        continue;
 
-               flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+               if (link_mask & (1 << (i + FIMC_MAX_DEVS)))
+                       flags = MEDIA_LNK_FL_ENABLED;
+               else
+                       flags = 0;
 
                sink = &fmd->fimc_lite[i]->subdev.entity;
                ret = media_entity_create_link(source, pad, sink,
@@ -614,9 +621,8 @@ static int fimc_md_create_links(struct fimc_md *fmd)
        struct s5p_fimc_isp_info *pdata;
        struct fimc_sensor_info *s_info;
        struct media_entity *source, *sink;
-       int i, pad, fimc_id = 0;
-       int ret = 0;
-       u32 flags;
+       int i, pad, fimc_id = 0, ret = 0;
+       u32 flags, link_mask = 0;
 
        for (i = 0; i < fmd->num_sensors; i++) {
                if (fmd->sensor[i].subdev == NULL)
@@ -668,19 +674,20 @@ static int fimc_md_create_links(struct fimc_md *fmd)
                if (source == NULL)
                        continue;
 
+               link_mask = 1 << fimc_id++;
                ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
-                                                      pad, fimc_id++);
+                                                      pad, link_mask);
        }
 
-       fimc_id = 0;
        for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
                if (fmd->csis[i].sd == NULL)
                        continue;
                source = &fmd->csis[i].sd->entity;
                pad = CSIS_PAD_SOURCE;
 
+               link_mask = 1 << fimc_id++;
                ret = __fimc_md_create_fimc_sink_links(fmd, source, NULL,
-                                                      pad, fimc_id++);
+                                                      pad, link_mask);
        }
 
        /* Create immutable links between each FIMC's subdev and video node */
@@ -734,8 +741,8 @@ static void fimc_md_put_clocks(struct fimc_md *fmd)
 }
 
 static int __fimc_md_set_camclk(struct fimc_md *fmd,
-                                        struct fimc_sensor_info *s_info,
-                                        bool on)
+                               struct fimc_sensor_info *s_info,
+                               bool on)
 {
        struct s5p_fimc_isp_info *pdata = s_info->pdata;
        struct fimc_camclk_info *camclk;
@@ -744,12 +751,10 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
        if (WARN_ON(pdata->clk_id >= FIMC_MAX_CAMCLKS) || fmd == NULL)
                return -EINVAL;
 
-       if (s_info->clk_on == on)
-               return 0;
        camclk = &fmd->camclk[pdata->clk_id];
 
-       dbg("camclk %d, f: %lu, clk: %p, on: %d",
-           pdata->clk_id, pdata->clk_frequency, camclk, on);
+       dbg("camclk %d, f: %lu, use_count: %d, on: %d",
+           pdata->clk_id, pdata->clk_frequency, camclk->use_count, on);
 
        if (on) {
                if (camclk->use_count > 0 &&
@@ -760,11 +765,9 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
                        clk_set_rate(camclk->clock, pdata->clk_frequency);
                        camclk->frequency = pdata->clk_frequency;
                        ret = clk_enable(camclk->clock);
+                       dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
+                           clk_get_rate(camclk->clock));
                }
-               s_info->clk_on = 1;
-               dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
-                   clk_get_rate(camclk->clock));
-
                return ret;
        }
 
@@ -773,7 +776,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
 
        if (--camclk->use_count == 0) {
                clk_disable(camclk->clock);
-               s_info->clk_on = 0;
                dbg("Disabled camclk %d", pdata->clk_id);
        }
        return ret;
@@ -789,8 +791,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
  * devices to which sensors can be attached, either directly or through
  * the MIPI CSI receiver. The clock is allowed here to be used by
  * multiple sensors concurrently if they use same frequency.
- * The per sensor subdev clk_on attribute helps to synchronize accesses
- * to the sclk_cam clocks from the video and media device nodes.
  * This function should only be called when the graph mutex is held.
  */
 int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
index 3b8a3492a17671fc86658db8eb410861e8cba8f2..1f5dbaff5442a7df686b4e6dc023952b8436939f 100644 (file)
@@ -47,7 +47,6 @@ struct fimc_camclk_info {
  * @pdata: sensor's atrributes passed as media device's platform data
  * @subdev: image sensor v4l2 subdev
  * @host: fimc device the sensor is currently linked to
- * @clk_on: sclk_cam clock's state associated with this subdev
  *
  * This data structure applies to image sensor and the writeback subdevs.
  */
@@ -55,7 +54,6 @@ struct fimc_sensor_info {
        struct s5p_fimc_isp_info *pdata;
        struct v4l2_subdev *subdev;
        struct fimc_dev *host;
-       bool clk_on;
 };
 
 /**
index 4dd32fc8fd82c740a8db84853e0e8e3422e6d238..feea867f318c25a6bce189d2bf6e95fb91a42422 100644 (file)
@@ -996,6 +996,7 @@ int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
 
        for (i = 0; i < NUM_CTRLS; i++) {
                if (IS_MFC51_PRIV(controls[i].id)) {
+                       memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
                        cfg.ops = &s5p_mfc_dec_ctrl_ops;
                        cfg.id = controls[i].id;
                        cfg.min = controls[i].minimum;
index 03d83340e7fb6bcaaded17b1da810610a8c40ad7..158b78989b89dc43dd7ced55dec2ca91a6baa28e 100644 (file)
@@ -1773,6 +1773,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
        }
        for (i = 0; i < NUM_CTRLS; i++) {
                if (IS_MFC51_PRIV(controls[i].id)) {
+                       memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
                        cfg.ops = &s5p_mfc_enc_ctrl_ops;
                        cfg.id = controls[i].id;
                        cfg.min = controls[i].minimum;
index e8c93c89265a22e7d4fad6e8e7f59d3511abeca9..9cf5bda35fbe1cfe332e8b18ae3d9218b3e8f54b 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/device.h>
 #include <linux/gpio.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/v4l2-mediabus.h>
index e129c820df7da7d6430e62891558f92cdbe235ab..92144ed1ad469d8257eab6636c4d7f8cc4c65180 100644 (file)
@@ -286,6 +286,7 @@ config TWL6040_CORE
        depends on I2C=y && GENERIC_HARDIRQS
        select MFD_CORE
        select REGMAP_I2C
+       select IRQ_DOMAIN
        default n
        help
          Say yes here if you want support for Texas Instruments TWL6040 audio
diff --git a/drivers/mfd/ab5500-core.h b/drivers/mfd/ab5500-core.h
deleted file mode 100644 (file)
index 63b30b1..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2011 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Shared definitions and data structures for the AB5500 MFD driver
- */
-
-/* Read/write operation values. */
-#define AB5500_PERM_RD (0x01)
-#define AB5500_PERM_WR (0x02)
-
-/* Read/write permissions. */
-#define AB5500_PERM_RO (AB5500_PERM_RD)
-#define AB5500_PERM_RW (AB5500_PERM_RD | AB5500_PERM_WR)
-
-#define AB5500_MASK_BASE (0x60)
-#define AB5500_MASK_END (0x79)
-#define AB5500_CHIP_ID (0x20)
-
-/**
- * struct ab5500_reg_range
- * @first: the first address of the range
- * @last: the last address of the range
- * @perm: access permissions for the range
- */
-struct ab5500_reg_range {
-       u8 first;
-       u8 last;
-       u8 perm;
-};
-
-/**
- * struct ab5500_i2c_ranges
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_ranges {
-       u8 nranges;
-       u8 bankid;
-       const struct ab5500_reg_range *range;
-};
-
-/**
- * struct ab5500_i2c_banks
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_banks {
-       u8 nbanks;
-       const struct ab5500_i2c_ranges *bank;
-};
-
-/**
- * struct ab5500_bank
- * @slave_addr: I2C slave_addr found in AB5500 specification
- * @name: Documentation name of the bank. For reference
- */
-struct ab5500_bank {
-       u8 slave_addr;
-       const char *name;
-};
-
-static const struct ab5500_bank bankinfo[AB5500_NUM_BANKS] = {
-       [AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP] = {
-               AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP, "VIT_IO_I2C_CLK_TST_OTP"},
-       [AB5500_BANK_VDDDIG_IO_I2C_CLK_TST] = {
-               AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST, "VDDDIG_IO_I2C_CLK_TST"},
-       [AB5500_BANK_VDENC] = {AB5500_ADDR_VDENC, "VDENC"},
-       [AB5500_BANK_SIM_USBSIM] = {AB5500_ADDR_SIM_USBSIM, "SIM_USBSIM"},
-       [AB5500_BANK_LED] = {AB5500_ADDR_LED, "LED"},
-       [AB5500_BANK_ADC] = {AB5500_ADDR_ADC, "ADC"},
-       [AB5500_BANK_RTC] = {AB5500_ADDR_RTC, "RTC"},
-       [AB5500_BANK_STARTUP] = {AB5500_ADDR_STARTUP, "STARTUP"},
-       [AB5500_BANK_DBI_ECI] = {AB5500_ADDR_DBI_ECI, "DBI-ECI"},
-       [AB5500_BANK_CHG] = {AB5500_ADDR_CHG, "CHG"},
-       [AB5500_BANK_FG_BATTCOM_ACC] = {
-               AB5500_ADDR_FG_BATTCOM_ACC, "FG_BATCOM_ACC"},
-       [AB5500_BANK_USB] = {AB5500_ADDR_USB, "USB"},
-       [AB5500_BANK_IT] = {AB5500_ADDR_IT, "IT"},
-       [AB5500_BANK_VIBRA] = {AB5500_ADDR_VIBRA, "VIBRA"},
-       [AB5500_BANK_AUDIO_HEADSETUSB] = {
-               AB5500_ADDR_AUDIO_HEADSETUSB, "AUDIO_HEADSETUSB"},
-};
-
-int ab5500_get_register_interruptible_raw(struct ab5500 *ab, u8 bank, u8 reg,
-       u8 *value);
-int ab5500_mask_and_set_register_interruptible_raw(struct ab5500 *ab, u8 bank,
-       u8 reg, u8 bitmask, u8 bitvalues);
index 3fcdab3eb8eb67fb02a08c02166613e11742924c..03df422feb763ab075ac5fa741ef09b71c066d1d 100644 (file)
@@ -49,10 +49,72 @@ static struct regmap_config mc13xxx_regmap_spi_config = {
        .reg_bits = 7,
        .pad_bits = 1,
        .val_bits = 24,
+       .write_flag_mask = 0x80,
 
        .max_register = MC13XXX_NUMREGS,
 
        .cache_type = REGCACHE_NONE,
+       .use_single_rw = 1,
+};
+
+static int mc13xxx_spi_read(void *context, const void *reg, size_t reg_size,
+                               void *val, size_t val_size)
+{
+       unsigned char w[4] = { *((unsigned char *) reg), 0, 0, 0};
+       unsigned char r[4];
+       unsigned char *p = val;
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+       struct spi_transfer t = {
+               .tx_buf = w,
+               .rx_buf = r,
+               .len = 4,
+       };
+
+       struct spi_message m;
+       int ret;
+
+       if (val_size != 3 || reg_size != 1)
+               return -ENOTSUPP;
+
+       spi_message_init(&m);
+       spi_message_add_tail(&t, &m);
+       ret = spi_sync(spi, &m);
+
+       memcpy(p, &r[1], 3);
+
+       return ret;
+}
+
+static int mc13xxx_spi_write(void *context, const void *data, size_t count)
+{
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+
+       if (count != 4)
+               return -ENOTSUPP;
+
+       return spi_write(spi, data, count);
+}
+
+/*
+ * We cannot use regmap-spi generic bus implementation here.
+ * The MC13783 chip will get corrupted if CS signal is deasserted
+ * and on i.Mx31 SoC (the target SoC for MC13783 PMIC) the SPI controller
+ * has the following errata (DSPhl22960):
+ * "The CSPI negates SS when the FIFO becomes empty with
+ * SSCTL= 0. Software cannot guarantee that the FIFO will not
+ * drain because of higher priority interrupts and the
+ * non-realtime characteristics of the operating system. As a
+ * result, the SS will negate before all of the data has been
+ * transferred to/from the peripheral."
+ * We workaround this by accessing the SPI controller with a
+ * single transfert.
+ */
+
+static struct regmap_bus regmap_mc13xxx_bus = {
+       .write = mc13xxx_spi_write,
+       .read = mc13xxx_spi_read,
 };
 
 static int mc13xxx_spi_probe(struct spi_device *spi)
@@ -73,12 +135,13 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
 
        dev_set_drvdata(&spi->dev, mc13xxx);
        spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
-       spi->bits_per_word = 32;
 
        mc13xxx->dev = &spi->dev;
        mutex_init(&mc13xxx->lock);
 
-       mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+       mc13xxx->regmap = regmap_init(&spi->dev, &regmap_mc13xxx_bus, &spi->dev,
+                                       &mc13xxx_regmap_spi_config);
+
        if (IS_ERR(mc13xxx->regmap)) {
                ret = PTR_ERR(mc13xxx->regmap);
                dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
index 7e96bb2297244f7946537a5da5f78226e6a4db40..41088ecbb2a92e3c6350038ad61f77ceb500e8ce 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
+#include <linux/gpio.h>
 #include <plat/cpu.h>
 #include <plat/usb.h>
 #include <linux/pm_runtime.h>
@@ -500,8 +501,21 @@ static void omap_usbhs_init(struct device *dev)
        dev_dbg(dev, "starting TI HSUSB Controller\n");
 
        pm_runtime_get_sync(dev);
-       spin_lock_irqsave(&omap->lock, flags);
 
+       if (pdata->ehci_data->phy_reset) {
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+                                        GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+                                        GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+               /* Hold the PHY in RESET for enough time till DIR is high */
+               udelay(10);
+       }
+
+       spin_lock_irqsave(&omap->lock, flags);
        omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
        dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
 
@@ -581,9 +595,39 @@ static void omap_usbhs_init(struct device *dev)
        }
 
        spin_unlock_irqrestore(&omap->lock, flags);
+
+       if (pdata->ehci_data->phy_reset) {
+               /* Hold the PHY in RESET for enough time till
+                * PHY is settled and ready
+                */
+               udelay(10);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_set_value_cansleep
+                               (pdata->ehci_data->reset_gpio_port[0], 1);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_set_value_cansleep
+                               (pdata->ehci_data->reset_gpio_port[1], 1);
+       }
+
        pm_runtime_put_sync(dev);
 }
 
+static void omap_usbhs_deinit(struct device *dev)
+{
+       struct usbhs_hcd_omap           *omap = dev_get_drvdata(dev);
+       struct usbhs_omap_platform_data *pdata = &omap->platdata;
+
+       if (pdata->ehci_data->phy_reset) {
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+       }
+}
+
 
 /**
  * usbhs_omap_probe - initialize TI-based HCDs
@@ -767,6 +811,7 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
        goto end_probe;
 
 err_alloc:
+       omap_usbhs_deinit(&pdev->dev);
        iounmap(omap->tll_base);
 
 err_tll:
@@ -818,6 +863,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
 {
        struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
 
+       omap_usbhs_deinit(&pdev->dev);
        iounmap(omap->tll_base);
        iounmap(omap->uhh_base);
        clk_put(omap->init_60m_fclk);
index 00c0aba7eba000de65742b04014419e0115c6eba..c4a69f193a1df1985abfbaeeffb8e39cda933493 100644 (file)
@@ -356,7 +356,14 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
                }
        }
 
-       ret = regmap_add_irq_chip(palmas->regmap[1], palmas->irq,
+       /* Change IRQ into clear on read mode for efficiency */
+       slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE);
+       addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL);
+       reg = PALMAS_INT_CTRL_INT_CLEAR;
+
+       regmap_write(palmas->regmap[slave], addr, reg);
+
+       ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq,
                        IRQF_ONESHOT | IRQF_TRIGGER_LOW, -1, &palmas_irq_chip,
                        &palmas->irq_data);
        if (ret < 0)
@@ -441,6 +448,9 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
                goto err;
        }
 
+       children[PALMAS_PMIC_ID].platform_data = pdata->pmic_pdata;
+       children[PALMAS_PMIC_ID].pdata_size = sizeof(*pdata->pmic_pdata);
+
        ret = mfd_add_devices(palmas->dev, -1,
                              children, ARRAY_SIZE(palmas_children),
                              NULL, regmap_irq_chip_get_base(palmas->irq_data));
@@ -472,6 +482,7 @@ static const struct i2c_device_id palmas_i2c_id[] = {
        { "twl6035", },
        { "twl6037", },
        { "tps65913", },
+       { /* end */ }
 };
 MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
 
index 7de13891e49e8f2c788ad2e7ca274f257e1c41f5..783fcd7365bc1e770739db884bba4a5088479e22 100644 (file)
@@ -1147,7 +1147,7 @@ static int mei_pci_resume(struct device *device)
                err = request_threaded_irq(pdev->irq,
                        NULL,
                        mei_interrupt_thread_handler,
-                       0, mei_driver_name, dev);
+                       IRQF_ONESHOT, mei_driver_name, dev);
        else
                err = request_threaded_irq(pdev->irq,
                        mei_interrupt_quick_handler,
index 17bbacb1b4b131b119b0076b339f44012fcc51f7..87b251ab6ec582f2c8177687d0b330d97110fb50 100644 (file)
@@ -452,9 +452,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
 
                if (msg->activate_gru_mq_desc_gpa !=
                    part_uv->activate_gru_mq_desc_gpa) {
-                       spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+                       spin_lock(&part_uv->flags_lock);
                        part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
-                       spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+                       spin_unlock(&part_uv->flags_lock);
                        part_uv->activate_gru_mq_desc_gpa =
                            msg->activate_gru_mq_desc_gpa;
                }
index f13e38deceac760fcbd9ae4cd2a1fdf7e5d79671..8f5dc08d65989d8526f8bc59d1ed91426e93108d 100644 (file)
@@ -50,8 +50,8 @@ int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio)
                goto egpioreq;
 
        ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
-                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                                  cd->label, host);
+                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                                  IRQF_ONESHOT, cd->label, host);
        if (ret < 0)
                goto eirqreq;
 
index 258b203397aa88a381a9e4f007eacf77b9452aa3..4f4489aa6baede795ae21c222e0aa9e99e7b5800 100644 (file)
@@ -717,10 +717,6 @@ static int mmc_select_powerclass(struct mmc_card *card,
                                 card->ext_csd.generic_cmd6_time);
        }
 
-       if (err)
-               pr_err("%s: power class selection for ext_csd_bus_width %d"
-                      " failed\n", mmc_hostname(card->host), bus_width);
-
        return err;
 }
 
@@ -1104,7 +1100,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                                EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
                err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
                if (err)
-                       goto err;
+                       pr_warning("%s: power class selection to bus width %d"
+                                  " failed\n", mmc_hostname(card->host),
+                                  1 << bus_width);
        }
 
        /*
@@ -1136,7 +1134,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
                                                    ext_csd);
                        if (err)
-                               goto err;
+                               pr_warning("%s: power class selection to "
+                                          "bus width %d failed\n",
+                                          mmc_hostname(card->host),
+                                          1 << bus_width);
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
@@ -1164,7 +1165,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
                                                    ext_csd);
                        if (err)
-                               goto err;
+                               pr_warning("%s: power class selection to "
+                                          "bus width %d ddr %d failed\n",
+                                          mmc_hostname(card->host),
+                                          1 << bus_width, ddr);
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
index 41371ba1a8117c87aaaf85b63383e60bda6303d6..f3f6cfedd69eb5e1367c196de7e8e249740bb77b 100644 (file)
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
 static int cafe_device_ready(struct mtd_info *mtd)
 {
        struct cafe_priv *cafe = mtd->priv;
-       int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
+       int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
        uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
 
        cafe_writel(cafe, irqs, NAND_IRQ);
index a05b7b444d4f1f8a92d6f0e10a2ee1db19d3c722..a6cad5caba788fe8b665270a3fc7494d7a071887 100644 (file)
@@ -920,12 +920,12 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                 */
                memset(chip->oob_poi, ~0, mtd->oobsize);
                chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
-
-               read_page_swap_end(this, buf, mtd->writesize,
-                               this->payload_virt, this->payload_phys,
-                               nfc_geo->payload_size,
-                               payload_virt, payload_phys);
        }
+
+       read_page_swap_end(this, buf, mtd->writesize,
+                       this->payload_virt, this->payload_phys,
+                       nfc_geo->payload_size,
+                       payload_virt, payload_phys);
 exit_nfc:
        return ret;
 }
index c58e6a93f44501d68056d46ea40dc0373d9a51a7..6acc790c2fbb96880ec29642a1d2e7e2528dbee9 100644 (file)
@@ -273,6 +273,26 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
 
 static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
 
+static void memcpy32_fromio(void *trg, const void __iomem  *src, size_t size)
+{
+       int i;
+       u32 *t = trg;
+       const __iomem u32 *s = src;
+
+       for (i = 0; i < (size >> 2); i++)
+               *t++ = __raw_readl(s++);
+}
+
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+       int i;
+       u32 __iomem *t = trg;
+       const u32 *s = src;
+
+       for (i = 0; i < (size >> 2); i++)
+               __raw_writel(*s++, t++);
+}
+
 static int check_int_v3(struct mxc_nand_host *host)
 {
        uint32_t tmp;
@@ -519,7 +539,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
 
        wait_op_done(host, true);
 
-       memcpy_fromio(host->data_buf, host->main_area0, 16);
+       memcpy32_fromio(host->data_buf, host->main_area0, 16);
 }
 
 /* Request the NANDFC to perform a read of the NAND device ID. */
@@ -535,7 +555,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
        /* Wait for operation to complete */
        wait_op_done(host, true);
 
-       memcpy_fromio(host->data_buf, host->main_area0, 16);
+       memcpy32_fromio(host->data_buf, host->main_area0, 16);
 
        if (this->options & NAND_BUSWIDTH_16) {
                /* compress the ID info */
@@ -797,16 +817,16 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
 
        if (bfrom) {
                for (i = 0; i < n - 1; i++)
-                       memcpy_fromio(d + i * j, s + i * t, j);
+                       memcpy32_fromio(d + i * j, s + i * t, j);
 
                /* the last section */
-               memcpy_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
+               memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
        } else {
                for (i = 0; i < n - 1; i++)
-                       memcpy_toio(&s[i * t], &d[i * j], j);
+                       memcpy32_toio(&s[i * t], &d[i * j], j);
 
                /* the last section */
-               memcpy_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+               memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
        }
 }
 
@@ -1070,7 +1090,8 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
 
                host->devtype_data->send_page(mtd, NFC_OUTPUT);
 
-               memcpy_fromio(host->data_buf, host->main_area0, mtd->writesize);
+               memcpy32_fromio(host->data_buf, host->main_area0,
+                               mtd->writesize);
                copy_spare(mtd, true);
                break;
 
@@ -1086,7 +1107,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
                break;
 
        case NAND_CMD_PAGEPROG:
-               memcpy_toio(host->main_area0, host->data_buf, mtd->writesize);
+               memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
                copy_spare(mtd, false);
                host->devtype_data->send_page(mtd, NFC_INPUT);
                host->devtype_data->send_cmd(host, command, true);
index d47586cf64ce4af2c802f8783869af8b96ccc0a4..a11253a0fcabd6ef7362b9fcae4972ba06fc4966 100644 (file)
@@ -3501,6 +3501,13 @@ int nand_scan_tail(struct mtd_info *mtd)
        /* propagate ecc info to mtd_info */
        mtd->ecclayout = chip->ecc.layout;
        mtd->ecc_strength = chip->ecc.strength;
+       /*
+        * Initialize bitflip_threshold to its default prior scan_bbt() call.
+        * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+        * properly set.
+        */
+       if (!mtd->bitflip_threshold)
+               mtd->bitflip_threshold = mtd->ecc_strength;
 
        /* Check, if we should skip the bad block table scan */
        if (chip->options & NAND_SKIP_BBTSCAN)
index 6cc8fbfabb8e2b71d50db215222d380f9e0061a1..cf0cd3146817f9275706f1d4f61b53724d0d5dc1 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/string.h>
@@ -546,12 +546,6 @@ static char *get_partition_name(int i)
        return kstrdup(buf, GFP_KERNEL);
 }
 
-static uint64_t divide(uint64_t n, uint32_t d)
-{
-       do_div(n, d);
-       return n;
-}
-
 /*
  * Initialize the nandsim structure.
  *
@@ -580,7 +574,7 @@ static int init_nandsim(struct mtd_info *mtd)
        ns->geom.oobsz    = mtd->oobsize;
        ns->geom.secsz    = mtd->erasesize;
        ns->geom.pgszoob  = ns->geom.pgsz + ns->geom.oobsz;
-       ns->geom.pgnum    = divide(ns->geom.totsz, ns->geom.pgsz);
+       ns->geom.pgnum    = div_u64(ns->geom.totsz, ns->geom.pgsz);
        ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
        ns->geom.secshift = ffs(ns->geom.secsz) - 1;
        ns->geom.pgshift  = chip->page_shift;
@@ -921,7 +915,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
 
        if (!rptwear)
                return 0;
-       wear_eb_count = divide(mtd->size, mtd->erasesize);
+       wear_eb_count = div_u64(mtd->size, mtd->erasesize);
        mem = wear_eb_count * sizeof(unsigned long);
        if (mem / sizeof(unsigned long) != wear_eb_count) {
                NS_ERR("Too many erase blocks for wear reporting\n");
index 0741aded9eb057bbc2454a220bd6a19318e975fa..f2db8fca46a13d8b431d977df4efee8a61bc0959 100644 (file)
@@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
        if (priv->mode == MQ_MG_MODE) {
                baddr = &regs->txic0;
                for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
-                       if (likely(priv->tx_queue[i]->txcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->tx_queue[i]->txcoalescing))
                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
-                       }
                }
 
                baddr = &regs->rxic0;
                for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
-                       if (likely(priv->rx_queue[i]->rxcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->rx_queue[i]->rxcoalescing))
                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
-                       }
                }
        }
 }
index 351a4097b2baec09c53ce45cace4c03c1c8dcb47..76edbc1be33b4d1151e72c24c1f621c830c3a039 100644 (file)
 #define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
 #define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
 #define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
 #define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 
index 31d37a2b5ba818e1366945b630f8b187ec9fb15b..623e30b9964de29e091f77564ccd53ce8ba30876 100644 (file)
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
  * @sk_buff: socket buffer with received data
  **/
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
-                             __le16 csum, struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        u16 status = (u16)status_err;
        u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
        if (status & E1000_RXD_STAT_IXSM)
                return;
 
-       /* TCP/UDP checksum error bit is set */
-       if (errors & E1000_RXD_ERR_TCPE) {
+       /* TCP/UDP checksum error bit or IP checksum error bit is set */
+       if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
                /* let the stack verify checksum errors */
                adapter->hw_csum_err++;
                return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
                return;
 
        /* It must be a TCP or UDP packet with a valid checksum */
-       if (status & E1000_RXD_STAT_TCPCS) {
-               /* TCP checksum is good */
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               /*
-                * IP fragment with UDP payload
-                * Hardware complements the payload checksum, so we undo it
-                * and then put the value in host order for further stack use.
-                */
-               __sum16 sum = (__force __sum16)swab16((__force u16)csum);
-               skb->csum = csum_unfold(~sum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-       }
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_good++;
 }
 
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                skb_put(skb, length);
 
                /* Receive Checksum Offload */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1341,8 +1328,7 @@ copydone:
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                        }
                }
 
-               /* Receive Checksum Offload XXX recompute due to CRC strip? */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               /* Receive Checksum Offload */
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
-       if (adapter->netdev->features & NETIF_F_RXCSUM) {
+       if (adapter->netdev->features & NETIF_F_RXCSUM)
                rxcsum |= E1000_RXCSUM_TUOFL;
-
-               /*
-                * IPv4 payload checksum for UDP fragments must be
-                * used in conjunction with packet-split.
-                */
-               if (adapter->rx_ps_pages)
-                       rxcsum |= E1000_RXCSUM_IPPCSE;
-       } else {
+       else
                rxcsum &= ~E1000_RXCSUM_TUOFL;
-               /* no need to clear IPPCSE as it defaults to 0 */
-       }
        ew32(RXCSUM, rxcsum);
 
        if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
-       if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
-               if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-                       e_err("Jumbo Frames not supported.\n");
-                       return -EINVAL;
-               }
-
-               /*
-                * IP payload checksum (enabled with jumbos/packet-split when
-                * Rx checksum is enabled) and generation of RSS hash is
-                * mutually exclusive in the hardware.
-                */
-               if ((netdev->features & NETIF_F_RXCSUM) &&
-                   (netdev->features & NETIF_F_RXHASH)) {
-                       e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
-                       return -EINVAL;
-               }
+       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+           !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+               e_err("Jumbo Frames not supported.\n");
+               return -EINVAL;
        }
 
        /* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
                         NETIF_F_RXALL)))
                return 0;
 
-       /*
-        * IP payload checksum (enabled with jumbos/packet-split when Rx
-        * checksum is enabled) and generation of RSS hash is mutually
-        * exclusive in the hardware.
-        */
-       if (adapter->rx_ps_pages &&
-           (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
-               e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
-               return -EINVAL;
-       }
-
        if (changed & NETIF_F_RXFCS) {
                if (features & NETIF_F_RXFCS) {
                        adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
index 8ce67064b9c5802efe098f702486477b424c273e..90eef07943f4d50bbd027646e170abca5abfb1a3 100644 (file)
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
-           ((ec->rx_coalesce_usecs > 3) &&
-            (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
-           (ec->rx_coalesce_usecs == 2))
-               return -EINVAL;
-
-       /* convert to rate of irq's per second */
-       if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+       if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+            (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+               adapter->requested_itr = 1000000000 /
+                                       (adapter->current_itr * 256);
+       } else if ((ec->rx_coalesce_usecs == 3) ||
+                  (ec->rx_coalesce_usecs == 2)) {
                adapter->current_itr = IGBVF_START_ITR;
                adapter->requested_itr = ec->rx_coalesce_usecs;
-       } else {
-               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+       } else if (ec->rx_coalesce_usecs == 0) {
+               /*
+                * The user's desire is to turn off interrupt throttling
+                * altogether, but due to HW limitations, we can't do that.
+                * Instead we set a very small value in EITR, which would
+                * allow ~967k interrupts per second, but allow the adapter's
+                * internal clocking to still function properly.
+                */
+               adapter->current_itr = 4;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
-       }
+       } else
+               return -EINVAL;
 
        writel(adapter->current_itr,
               hw->hw_addr + adapter->rx_ring->itr_register);
index d614c374ed9d2ada5b6923e288528b8201a09807..3b5c4571b55e3c922a4b6e5a94a0a4a8adcb2fce 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
index 3767a122586043512cd6ded88b562fd9b97df88f..b01960fcfbc91110ede59a5e18f9fac94a912bc3 100644 (file)
@@ -197,6 +197,10 @@ err:
 static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
+
+       /* can be called while disconnecting */
+       if (!dev)
+               return 0;
        return qmi_wwan_manage_power(dev, on);
 }
 
index c54b7d37bff159f1fc8a7482a4d8bdfe806b9beb..420d69b2674ceae1282fcc9d8cbbc38d8c5d00af 100644 (file)
@@ -143,6 +143,7 @@ struct ath_common {
        u32 keymax;
        DECLARE_BITMAP(keymap, ATH_KEYMAX);
        DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+       DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
        enum ath_crypt_caps crypt_caps;
 
        unsigned int clockrate;
index 1c68e564f503197c9642ba1cae5e9633325fbe8c..995ca8e1302efc7562ff9905d8e91d00ffb25f74 100644 (file)
@@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
        if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
                if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
-                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
                     !ah->is_pciexpress)) {
                        ah->config.serialize_regmode =
                                SER_REG_MODE_ON;
index e1fcc68124dc3bc78359cf72e2da459ddf9c4560..0735aeb3b26cefcd15c2847a1775718d36cecbc6 100644 (file)
@@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
                        __skb_unlink(skb, &rx_edma->rx_fifo);
                        list_add_tail(&bf->list, &sc->rx.rxbuf);
                        ath_rx_edma_buf_link(sc, qtype);
-               } else {
-                       bf = NULL;
                }
+
+               bf = NULL;
        }
 
        *dest = bf;
@@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
         * descriptor does contain a valid key index. This has been observed
         * mostly with CCMP encryption.
         */
-       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+           !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
                rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
 
        if (!rx_stats->rs_datalen) {
index 0e81904956cf1c04fa37ad01e65282b55e2bf8f9..5c54aa43ca2d81b3936d917aeb68f7331aabb34b 100644 (file)
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
                return -EIO;
 
        set_bit(idx, common->keymap);
+       if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+               set_bit(idx, common->ccmp_keymap);
+
        if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
                set_bit(idx + 64, common->keymap);
                set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
                return;
 
        clear_bit(key->hw_key_idx, common->keymap);
+       clear_bit(key->hw_key_idx, common->ccmp_keymap);
        if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
                return;
 
index 3ee23134c02b9b96ee184a5999e9577bf6452511..013680332f075be85a9035bcec33eaec0c2e9285 100644 (file)
@@ -796,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
        switch (op) {
        case ADD:
                ret = iwlagn_mac_sta_add(hw, vif, sta);
+               if (ret)
+                       break;
+               /*
+                * Clear the in-progress flag, the AP station entry was added
+                * but we'll initialize LQ only when we've associated (which
+                * would also clear the in-progress flag). This is necessary
+                * in case we never initialize LQ because association fails.
+                */
+               spin_lock_bh(&priv->sta_lock);
+               priv->stations[iwl_sta_id(sta)].used &=
+                       ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_bh(&priv->sta_lock);
                break;
        case REMOVE:
                ret = iwlagn_mac_sta_remove(hw, vif, sta);
index 9c44088054dd90bd967626d8416e63c083225fc8..900ee129e825987dfb2dbe6b485bfa0bb40dbc74 100644 (file)
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        else
                last_seq = priv->rx_seq[tid];
 
-       if (last_seq >= new_node->start_win)
+       if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+           last_seq >= new_node->start_win)
                new_node->start_win = last_seq + 1;
 
        new_node->win_size = win_size;
@@ -596,5 +597,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
-       memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+       mwifiex_reset_11n_rx_seq_num(priv);
 }
index f1bffebabc60a6ab1059276664bc3f3f71e129bb..6c9815a0f5d8b0d7aebcb5d6a9953a24819ad45a 100644 (file)
 
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+       memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
                               u16 seqNum,
                               u16 tid, u8 *ta,
index ceb82cd749cc7ba1ca2054ae0b675802d2194190..383820a52beba0d62428614131adbe42def1e5db 100644 (file)
@@ -213,6 +213,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
                /* save assoc resp ie index after auto-indexing */
                *assoc_idx = *((u16 *)pos);
 
+       kfree(ap_custom_ie);
        return ret;
 }
 
index e0377473282f05fab01835578cb9b2579a40217f..fc8a9bfa1248305fababa37b59ca90110a57afaa 100644 (file)
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
                adapter->event_cause = *(u32 *) skb->data;
 
-               skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
-
                if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
-                       memcpy(adapter->event_body, skb->data, skb->len);
+                       memcpy(adapter->event_body,
+                              skb->data + MWIFIEX_EVENT_HEADER_LEN,
+                              skb->len);
 
                /* event cause has been saved to adapter->event_cause */
                adapter->event_received = true;
index 4ace5a3dcd237c5aada48223e224a7f943ef2692..11e731f3581c2dbf4856d8676ca6ee66c12ccabe 100644 (file)
@@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_UAP_STA_ASSOC:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                memset(&sinfo, 0, sizeof(sinfo));
-               event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
+               event = (struct mwifiex_assoc_event *)
+                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
                        len = -1;
 
@@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                 GFP_KERNEL);
                break;
        case EVENT_UAP_STA_DEAUTH:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-               cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
-                                GFP_KERNEL);
+               cfg80211_del_sta(priv->netdev, adapter->event_body +
+                                MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
                break;
        case EVENT_UAP_BSS_IDLE:
                priv->media_connected = false;
index 49ebf20c56ebc0ac98dc8d5b25bc3a1164847020..22a5916564b84099576e1c554b37d1fcbcd420b2 100644 (file)
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        struct device *dev = adapter->dev;
        u32 recv_type;
        __le32 tmp;
+       int ret;
 
        if (adapter->hs_activated)
                mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_CMD:
                        if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
                                dev_err(dev, "CMD: skb->len too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        } else if (!adapter->curr_cmd) {
                                dev_dbg(dev, "CMD: no curr_cmd\n");
                                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
                                        mwifiex_process_sleep_confirm_resp(
                                                        adapter, skb->data,
                                                        skb->len);
-                                       return 0;
+                                       ret = 0;
+                                       goto exit_restore_skb;
                                }
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
                        adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_EVENT:
                        if (skb->len < sizeof(u32)) {
                                dev_err(dev, "EVENT: skb->len too small\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
                        skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
                        adapter->event_cause = le32_to_cpu(tmp);
-                       skb_pull(skb, sizeof(u32));
                        dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
 
                        if (skb->len > MAX_EVENT_SIZE) {
                                dev_err(dev, "EVENT: event body too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
-                       skb_copy_from_linear_data(skb, adapter->event_body,
-                                                 skb->len);
+                       memcpy(adapter->event_body, skb->data +
+                              MWIFIEX_EVENT_HEADER_LEN, skb->len);
+
                        adapter->event_received = true;
                        adapter->event_skb = skb;
                        break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        }
 
        return -EINPROGRESS;
+
+exit_restore_skb:
+       /* The buffer will be reused for further cmds/events */
+       skb_push(skb, INTF_HEADER_LEN);
+
+       return ret;
 }
 
 static void mwifiex_usb_rx_complete(struct urb *urb)
index f3fc6551585780b08724bb8112457caafeef220e..3fa4d417699381225e853a56238e0d8506a2f99b 100644 (file)
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
                priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
 
+               mwifiex_reset_11n_rx_seq_num(priv);
+
                atomic_set(&priv->wmm.tx_pkts_queued, 0);
                atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
        }
@@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
 
        if (!ptr->is_11n_enabled ||
            mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+           priv->wps.session_enable ||
            ((priv->sec_info.wpa_enabled ||
              priv->sec_info.wpa2_enabled) &&
             !priv->wpa_is_gtk_set)) {
index d228358e6a403b9c8eee56d63ce4991058975dc9..9970c2b1b19979dc3577472c7c21e27703a38a76 100644 (file)
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+       {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
        /* HP - Lite-On ,8188CUS Slim Combo */
        {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
        {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
        {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+       {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
        {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
        {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
index 54156b0b5c2d7defe6b42791d022d4c982fd945f..d7b907e67170348e70302e6a1cfe70642a8c6f7e 100644 (file)
@@ -1,7 +1,6 @@
 config WLCORE
        tristate "TI wlcore support"
        depends on WL_TI && GENERIC_HARDIRQS && MAC80211
-       depends on INET
        select FW_LOADER
        ---help---
          This module contains the main code for TI WLAN chips.  It abstracts
index 343ad29e211c66768491e325046ff0b58bcb15ec..e44f8c2d239d253afc045164834f0476b94cf932 100644 (file)
@@ -317,10 +317,9 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
        for(; lookup->compatible != NULL; lookup++) {
                if (!of_device_is_compatible(np, lookup->compatible))
                        continue;
-               if (of_address_to_resource(np, 0, &res))
-                       continue;
-               if (res.start != lookup->phys_addr)
-                       continue;
+               if (!of_address_to_resource(np, 0, &res))
+                       if (res.start != lookup->phys_addr)
+                               continue;
                pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
                return lookup;
        }
@@ -462,4 +461,5 @@ int of_platform_populate(struct device_node *root,
        of_node_put(root);
        return rc;
 }
+EXPORT_SYMBOL_GPL(of_platform_populate);
 #endif /* CONFIG_OF_ADDRESS */
index bf0cee629b60f8bb763b4407f631e7ba7e778aa8..099f46cd8e87a4ca823dc24f707458857f26eaa5 100644 (file)
@@ -748,6 +748,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
 
        pci_pm_set_unknown_state(pci_dev);
 
+       /*
+        * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+        * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+        * hasn't been quiesced and tries to turn it off.  If the controller
+        * is already in D3, this can hang or cause memory corruption.
+        *
+        * Since the value of the COMMAND register doesn't matter once the
+        * device has been suspended, we can safely set it to 0 here.
+        */
+       if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+               pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
        return 0;
 }
 
index 77cb54a65cde10885d7b0b4406ae103faca9e8a2..447e83472c01558705d0685f7fd6af17a6f8deef 100644 (file)
@@ -1744,11 +1744,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
        if (target_state == PCI_POWER_ERROR)
                return -EIO;
 
-       /* Some devices mustn't be in D3 during system sleep */
-       if (target_state == PCI_D3hot &&
-                       (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
-               return 0;
-
        pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
 
        error = pci_set_power_state(dev, target_state);
index 194b243a281782864c0e6284fdcc23546e9f15b1..2a75216775410df88e19fd9faa21e0045c16d15d 100644 (file)
@@ -2929,32 +2929,6 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
 
-/*
- * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
- * ASUS motherboards will cause memory corruption or a system crash
- * if they are in D3 while the system is put into S3 sleep.
- */
-static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
-{
-       const char *sys_info;
-       static const char good_Asus_board[] = "P8Z68-V";
-
-       if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
-               return;
-       if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
-               return;
-       sys_info = dmi_get_system_info(DMI_BOARD_NAME);
-       if (sys_info && memcmp(sys_info, good_Asus_board,
-                       sizeof(good_Asus_board) - 1) == 0)
-               return;
-
-       dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
-       dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
-       device_set_wakeup_capable(&dev->dev, false);
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
-
 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
                          struct pci_fixup *end)
 {
index 8d81bafcb721b087048e2c0095871ea90ad8bf7d..08ccf03f1dd872ed3d4a573479a7b766f1ef7e90 100644 (file)
@@ -2636,9 +2636,12 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
 {
        struct regulator_dev *rdev = regulator->rdev;
        struct regulator *consumer;
-       int ret, output_uV, input_uV, total_uA_load = 0;
+       int ret, output_uV, input_uV = 0, total_uA_load = 0;
        unsigned int mode;
 
+       if (rdev->supply)
+               input_uV = regulator_get_voltage(rdev->supply);
+
        mutex_lock(&rdev->mutex);
 
        /*
@@ -2671,10 +2674,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
                goto out;
        }
 
-       /* get input voltage */
-       input_uV = 0;
-       if (rdev->supply)
-               input_uV = regulator_get_voltage(rdev->supply);
+       /* No supply? Use constraint voltage */
        if (input_uV <= 0)
                input_uV = rdev->constraints->input_uV;
        if (input_uV <= 0) {
index 24d880e78ec6704e8b84e6d0f6547602f1f639bb..f8d818abf98caf4dc8275a65e18695c41c639537 100644 (file)
@@ -4,9 +4,11 @@ menu "Remoteproc drivers (EXPERIMENTAL)"
 config REMOTEPROC
        tristate
        depends on EXPERIMENTAL
+       select FW_CONFIG
 
 config OMAP_REMOTEPROC
        tristate "OMAP remoteproc support"
+       depends on EXPERIMENTAL
        depends on ARCH_OMAP4
        depends on OMAP_IOMMU
        select REMOTEPROC
index 75506ec2840e2ec4d1e331f209377d874f604386..39d3aa41adda252c655b2f35f492f62f05c27ab3 100644 (file)
@@ -188,6 +188,26 @@ static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
                                        rpdev->id.name);
 }
 
+/**
+ * __ept_release() - deallocate an rpmsg endpoint
+ * @kref: the ept's reference count
+ *
+ * This function deallocates an ept, and is invoked when its @kref refcount
+ * drops to zero.
+ *
+ * Never invoke this function directly!
+ */
+static void __ept_release(struct kref *kref)
+{
+       struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+                                                 refcount);
+       /*
+        * At this point no one holds a reference to ept anymore,
+        * so we can directly free it
+        */
+       kfree(ept);
+}
+
 /* for more info, see below documentation of rpmsg_create_ept() */
 static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
                struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
@@ -206,6 +226,9 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
                return NULL;
        }
 
+       kref_init(&ept->refcount);
+       mutex_init(&ept->cb_lock);
+
        ept->rpdev = rpdev;
        ept->cb = cb;
        ept->priv = priv;
@@ -238,7 +261,7 @@ rem_idr:
        idr_remove(&vrp->endpoints, request);
 free_ept:
        mutex_unlock(&vrp->endpoints_lock);
-       kfree(ept);
+       kref_put(&ept->refcount, __ept_release);
        return NULL;
 }
 
@@ -302,11 +325,17 @@ EXPORT_SYMBOL(rpmsg_create_ept);
 static void
 __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
 {
+       /* make sure new inbound messages can't find this ept anymore */
        mutex_lock(&vrp->endpoints_lock);
        idr_remove(&vrp->endpoints, ept->addr);
        mutex_unlock(&vrp->endpoints_lock);
 
-       kfree(ept);
+       /* make sure in-flight inbound messages won't invoke cb anymore */
+       mutex_lock(&ept->cb_lock);
+       ept->cb = NULL;
+       mutex_unlock(&ept->cb_lock);
+
+       kref_put(&ept->refcount, __ept_release);
 }
 
 /**
@@ -790,12 +819,28 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
 
        /* use the dst addr to fetch the callback of the appropriate user */
        mutex_lock(&vrp->endpoints_lock);
+
        ept = idr_find(&vrp->endpoints, msg->dst);
+
+       /* let's make sure no one deallocates ept while we use it */
+       if (ept)
+               kref_get(&ept->refcount);
+
        mutex_unlock(&vrp->endpoints_lock);
 
-       if (ept && ept->cb)
-               ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src);
-       else
+       if (ept) {
+               /* make sure ept->cb doesn't go away while we use it */
+               mutex_lock(&ept->cb_lock);
+
+               if (ept->cb)
+                       ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
+                               msg->src);
+
+               mutex_unlock(&ept->cb_lock);
+
+               /* farewell, ept, we don't need you anymore */
+               kref_put(&ept->refcount, __ept_release);
+       } else
                dev_warn(dev, "msg received with no recepient\n");
 
        /* publish the real size of the buffer */
index 4bcf9ca2818ae740eadd016df48cc0461da23f6e..370889d0489bf9e4f196abcbbcd6f11b25e6f5a8 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 
 #define AB8500_RTC_SOFF_STAT_REG       0x00
 #define AB8500_RTC_CC_CONF_REG         0x01
@@ -422,7 +423,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
        }
 
        err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
-               IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
+               IRQF_NO_SUSPEND | IRQF_ONESHOT, "ab8500-rtc", rtc);
        if (err < 0) {
                rtc_device_unregister(rtc);
                return err;
@@ -430,7 +431,6 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, rtc);
 
-
        err = ab8500_sysfs_rtc_register(&pdev->dev);
        if (err) {
                dev_err(&pdev->dev, "sysfs RTC failed to register\n");
@@ -454,10 +454,16 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id ab8500_rtc_match[] = {
+       { .compatible = "stericsson,ab8500-rtc", },
+       {}
+};
+
 static struct platform_driver ab8500_rtc_driver = {
        .driver = {
                .name = "ab8500-rtc",
                .owner = THIS_MODULE,
+               .of_match_table = ab8500_rtc_match,
        },
        .probe  = ab8500_rtc_probe,
        .remove = __devexit_p(ab8500_rtc_remove),
index 5e1d64ee52289b9e7a47990f8c7de0b41c7fccaf..e3e50d69baf85e75966bdea6d7f7b95ae6d65af0 100644 (file)
@@ -202,10 +202,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
        struct platform_device *pdev = dev_id;
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
        void __iomem *ioaddr = pdata->ioaddr;
+       unsigned long flags;
        u32 status;
        u32 events = 0;
 
-       spin_lock_irq(&pdata->rtc->irq_lock);
+       spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
        status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
        /* clear interrupt sources */
        writew(status, ioaddr + RTC_RTCISR);
@@ -224,7 +225,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
                events |= (RTC_PF | RTC_IRQF);
 
        rtc_update_irq(pdata->rtc, 1, events);
-       spin_unlock_irq(&pdata->rtc->irq_lock);
+       spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
 
        return IRQ_HANDLED;
 }
index 1f76320e545b1cf15d563cc8996b8eed7122f14a..e2785479113ca06648949d6c6a78adee92c78367 100644 (file)
@@ -458,12 +458,12 @@ static int __devexit spear_rtc_remove(struct platform_device *pdev)
        clk_disable(config->clk);
        clk_put(config->clk);
        iounmap(config->ioaddr);
-       kfree(config);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res)
                release_mem_region(res->start, resource_size(res));
        platform_set_drvdata(pdev, NULL);
        rtc_device_unregister(config->rtc);
+       kfree(config);
 
        return 0;
 }
index 258abeabf6246a1ed7e4db116713bb3128009fa0..c5d06fe83bba6274f2e6b944a43489395e04e01c 100644 (file)
@@ -510,7 +510,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        }
 
        ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
-                                  IRQF_TRIGGER_RISING,
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
                                   dev_name(&rtc->dev), rtc);
        if (ret < 0) {
                dev_err(&pdev->dev, "IRQ is not free.\n");
index 532d212b6b2c7b633a9b4338da14c78439a1d3db..393e7ce8e95a15ef5907c820dcb2ff9bd3542ac8 100644 (file)
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
 
                if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
                        resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
-                       memcpy(&resp->ending_fis[0], r+16, 24);
+                       memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
                        ts->buf_valid_size = sizeof(*resp);
                }
        }
index 0c53c28dc3d379a5a09960e2e9ab8fae53cb8526..7e77cf62029190562d718fe7c365cb087c3e573e 100644 (file)
@@ -350,6 +350,7 @@ struct bnx2i_hba {
        struct pci_dev *pcidev;
        struct net_device *netdev;
        void __iomem *regview;
+       resource_size_t reg_base;
 
        u32 age;
        unsigned long cnic_dev_type;
index ece47e502282d389b91e5d170ca023688a22deb6..86a12b48e477e804e12765e7d535d7e70f44405e 100644 (file)
@@ -2724,7 +2724,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                goto arm_cq;
        }
 
-       reg_base = ep->hba->netdev->base_addr;
        if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
            (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
                config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
@@ -2740,7 +2739,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                /* 5709 device in normal node and 5706/5708 devices */
                reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
 
-       ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+       ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
                                          MB_KERNEL_CTX_SIZE);
        if (!ep->qp.ctx_base)
                return -ENOMEM;
index f8d516b531617426cc05d626188075facf4e5a6a..621538b8b5445fd3e3e2685213ae7118f7229fd0 100644 (file)
@@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
        bnx2i_identify_device(hba);
        bnx2i_setup_host_queue_size(hba, shost);
 
+       hba->reg_base = pci_resource_start(hba->pcidev, 0);
        if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr,
-                                              BNX2_MQ_CONFIG2);
+               hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
                if (!hba->regview)
                        goto ioreg_map_err;
        } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+               hba->regview = pci_iomap(hba->pcidev, 0, 4096);
                if (!hba->regview)
                        goto ioreg_map_err;
        }
@@ -884,7 +884,7 @@ cid_que_err:
        bnx2i_free_mp_bdt(hba);
 mp_bdt_mem_err:
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
 ioreg_map_err:
@@ -910,7 +910,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
        pci_dev_put(hba->pcidev);
 
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
        bnx2i_free_mp_bdt(hba);
index 441d88ad99a7bb3abadb8b1e9af25281ced8334b..d109cc3a17b64126ee9ff38a5da320a070ffc2c5 100644 (file)
@@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task)
        if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
            ((stat->stat == SAM_STAT_CHECK_CONDITION &&
              dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
-               ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
+               memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
 
                if (!link->sactive) {
-                       qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                } else {
-                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                        if (unlikely(link->eh_info.err_mask))
                                qc->flags |= ATA_QCFLAG_FAILED;
                }
@@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task)
                                qc->flags |= ATA_QCFLAG_FAILED;
                        }
 
-                       dev->sata_dev.tf.feature = 0x04; /* status err */
-                       dev->sata_dev.tf.command = ATA_ERR;
+                       dev->sata_dev.fis[3] = 0x04; /* status err */
+                       dev->sata_dev.fis[2] = ATA_ERR;
                }
        }
 
@@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
        struct domain_device *dev = qc->ap->private_data;
 
-       memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
+       ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
        return true;
 }
 
index 6986552b47e6f6af591681fa4f14dbe3c0b11a6f..77759c78cc21167cb93c002f6f951efeb4cab0db 100644 (file)
@@ -3960,7 +3960,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
 {
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = ha->tgt.qla_tgt;
-       int reason_code;
+       int login_code;
 
        ql_dbg(ql_dbg_tgt, vha, 0xe039,
            "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
@@ -4003,9 +4003,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
        {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
                    "qla_target(%d): Async LOOP_UP occured "
-                   "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
-                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+                   "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                if (tgt->link_reinit_iocb_pending) {
                        qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
                            0, 0, 0, 0, 0, 0);
@@ -4020,23 +4020,24 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
        case MBA_RSCN_UPDATE:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
                    "qla_target(%d): Async event %#x occured "
-                   "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code,
-                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+                   "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                break;
 
        case MBA_PORT_UPDATE:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
                    "qla_target(%d): Port update async event %#x "
-                   "occured: updating the ports database (m[1]=%x, m[2]=%x, "
-                   "m[3]=%x, m[4]=%x)", vha->vp_idx, code,
-                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
-               reason_code = le16_to_cpu(mailbox[2]);
-               if (reason_code == 0x4)
+                   "occured: updating the ports database (m[0]=%x, m[1]=%x, "
+                   "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+               login_code = le16_to_cpu(mailbox[2]);
+               if (login_code == 0x4)
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
                            "Async MB 2: Got PLOGI Complete\n");
-               else if (reason_code == 0x7)
+               else if (login_code == 0x7)
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
                            "Async MB 2: Port Logged Out\n");
                break;
@@ -4044,9 +4045,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
        default:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
                    "qla_target(%d): Async event %#x occured: "
-                   "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
-                   code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+                   "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+                   code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                break;
        }
 
index cb99da920068986d2c2153f6af507781561074d5..87901fa74dd7e1acb136cbdf0607aa4b8eeeb652 100644 (file)
@@ -58,7 +58,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
        struct ft_tport *tport;
        int i;
 
-       tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+       tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+                                         lockdep_is_held(&ft_lport_lock));
        if (tport && tport->tpg)
                return tport;
 
index ced26c8ccd573eb8e6757a30681901b7a0ac88eb..0d2ea0c224c35c6012e0d6c1f7fe04ff337a8931 100644 (file)
@@ -401,7 +401,7 @@ out:
 }
 
 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
-void __init udbg_init_debug_opal(void)
+void __init udbg_init_debug_opal_raw(void)
 {
        u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
        hvc_opal_privs[index] = &hvc_opal_boot_priv;
index 8fd398dffced7fe90afb5a6b13464279f3a7c5c6..ee469274a3fe0d03aed9259f5f4c380330742aa3 100644 (file)
@@ -500,6 +500,8 @@ retry:
                        goto retry;
                }
                if (!desc->reslength) { /* zero length read */
+                       dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+                       clear_bit(WDM_READ, &desc->flags);
                        spin_unlock_irq(&desc->iuspin);
                        goto retry;
                }
index 25a7422ee657b91021b6858615666b00a9ab19a2..8fb484984c86c6f6532d4ac8696cd5f6f4a7e5ff 100644 (file)
@@ -2324,12 +2324,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
 static int hub_port_reset(struct usb_hub *hub, int port1,
                        struct usb_device *udev, unsigned int delay, bool warm);
 
-/* Is a USB 3.0 port in the Inactive state? */
-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
 {
        return hub_is_superspeed(hub->hdev) &&
-               (portstatus & USB_PORT_STAT_LINK_STATE) ==
-               USB_SS_PORT_LS_SS_INACTIVE;
+               (((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_SS_INACTIVE) ||
+                ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_COMP_MOD)) ;
 }
 
 static int hub_port_wait_reset(struct usb_hub *hub, int port1,
@@ -2365,7 +2369,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                         *
                         * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
                         */
-                       if (hub_port_inactive(hub, portstatus)) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                int ret;
 
                                if ((portchange & USB_PORT_STAT_C_CONNECTION))
@@ -4408,9 +4412,7 @@ static void hub_events(void)
                        /* Warm reset a USB3 protocol port if it's in
                         * SS.Inactive state.
                         */
-                       if (hub_is_superspeed(hub->hdev) &&
-                               (portstatus & USB_PORT_STAT_LINK_STATE)
-                                       == USB_SS_PORT_LS_SS_INACTIVE) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                dev_dbg(hub_dev, "warm reset port %d\n", i);
                                hub_port_reset(hub, i, NULL,
                                                HUB_BH_RESET_TIME, true);
index 17cfb8a1131c254b411364d86a527d24677d15c1..c30435499a029de5ea40c24bfe6148feeadbebf5 100644 (file)
@@ -281,14 +281,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
                }
        }
 
+       /* Hold PHYs in reset while initializing EHCI controller */
        if (pdata->phy_reset) {
                if (gpio_is_valid(pdata->reset_gpio_port[0]))
-                       gpio_request_one(pdata->reset_gpio_port[0],
-                                        GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+                       gpio_set_value_cansleep(pdata->reset_gpio_port[0], 0);
 
                if (gpio_is_valid(pdata->reset_gpio_port[1]))
-                       gpio_request_one(pdata->reset_gpio_port[1],
-                                        GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+                       gpio_set_value_cansleep(pdata->reset_gpio_port[1], 0);
 
                /* Hold the PHY in RESET for enough time till DIR is high */
                udelay(10);
@@ -330,6 +329,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
 
        ehci_reset(omap_ehci);
+       ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+       if (ret) {
+               dev_err(dev, "failed to add hcd with err %d\n", ret);
+               goto err_add_hcd;
+       }
 
        if (pdata->phy_reset) {
                /* Hold the PHY in RESET for enough time till
@@ -344,12 +348,6 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
                        gpio_set_value_cansleep(pdata->reset_gpio_port[1], 1);
        }
 
-       ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
-       if (ret) {
-               dev_err(dev, "failed to add hcd with err %d\n", ret);
-               goto err_add_hcd;
-       }
-
        /* root ports should always stay powered */
        ehci_port_power(omap_ehci, 1);
 
index 2732ef660c5c08b85baeb38c23d5ec597bf15673..7b01094d7993957dba556c4231b781a524ad72e9 100644 (file)
@@ -462,6 +462,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
        }
 }
 
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+{
+       u32 pls = status_reg & PORT_PLS_MASK;
+
+       /* resume state is a xHCI internal state.
+        * Do not report it to usb core.
+        */
+       if (pls == XDEV_RESUME)
+               return;
+
+       /* When the CAS bit is set then warm reset
+        * should be performed on port
+        */
+       if (status_reg & PORT_CAS) {
+               /* The CAS bit can be set while the port is
+                * in any link state.
+                * Only roothubs have CAS bit, so we
+                * pretend to be in compliance mode
+                * unless we're already in compliance
+                * or the inactive state.
+                */
+               if (pls != USB_SS_PORT_LS_COMP_MOD &&
+                   pls != USB_SS_PORT_LS_SS_INACTIVE) {
+                       pls = USB_SS_PORT_LS_COMP_MOD;
+               }
+               /* Return also connection bit -
+                * hub state machine resets port
+                * when this bit is set.
+                */
+               pls |= USB_PORT_STAT_CONNECTION;
+       }
+       /* update status field */
+       *status |= pls;
+}
+
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                u16 wIndex, char *buf, u16 wLength)
 {
@@ -606,13 +642,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        else
                                status |= USB_PORT_STAT_POWER;
                }
-               /* Port Link State */
+               /* Update Port Link State for super speed ports*/
                if (hcd->speed == HCD_USB3) {
-                       /* resume state is a xHCI internal state.
-                        * Do not report it to usb core.
-                        */
-                       if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
-                               status |= (temp & PORT_PLS_MASK);
+                       xhci_hub_report_link_state(&status, temp);
                }
                if (bus_state->port_c_suspend & (1 << wIndex))
                        status |= 1 << USB_PORT_FEAT_C_SUSPEND;
index 23b4aefd103609bcf91b8341a6758a9fc4c3b618..8275645889da4ce779c08c88ac4ea06473f222e0 100644 (file)
@@ -885,6 +885,17 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
        num_trbs_free_temp = ep_ring->num_trbs_free;
        dequeue_temp = ep_ring->dequeue;
 
+       /* If we get two back-to-back stalls, and the first stalled transfer
+        * ends just before a link TRB, the dequeue pointer will be left on
+        * the link TRB by the code in the while loop.  So we have to update
+        * the dequeue pointer one segment further, or we'll jump off
+        * the segment into la-la-land.
+        */
+       if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+               ep_ring->deq_seg = ep_ring->deq_seg->next;
+               ep_ring->dequeue = ep_ring->deq_seg->trbs;
+       }
+
        while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
                /* We have more usable TRBs */
                ep_ring->num_trbs_free++;
index de3d6e3e57be4b12e840f5a168b439b151fe84fc..55c0785810c99fb5286e29d39cd4d7ff2b98addb 100644 (file)
@@ -341,7 +341,11 @@ struct xhci_op_regs {
 #define PORT_PLC       (1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC       (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS       (1 << 24)
 /* wake on connect (enable) */
 #define PORT_WKCONN_E  (1 << 25)
 /* wake on disconnect (enable) */
index 81423f7361dbe8f770086c3a03f6aca09b4beb2c..d47eb06fe463b51463cb899659a5f2428d5bd24b 100644 (file)
@@ -222,14 +222,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
        metro_priv->throttled = 0;
        spin_unlock_irqrestore(&metro_priv->lock, flags);
 
-       /*
-        * Force low_latency on so that our tty_push actually forces the data
-        * through, otherwise it is scheduled, and with high data rates (like
-        * with OHCI) data can get lost.
-        */
-       if (tty)
-               tty->low_latency = 1;
-
        /* Clear the urb pipe. */
        usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe);
 
index adf8ce72be50fb9fed94377fc54a7e22814a832f..417ab1b0aa30fe4088daf4017b5035b325bb0f3a 100644 (file)
@@ -497,6 +497,15 @@ static void option_instat_callback(struct urb *urb);
 
 /* MediaTek products */
 #define MEDIATEK_VENDOR_ID                     0x0e8d
+#define MEDIATEK_PRODUCT_DC_1COM               0x00a0
+#define MEDIATEK_PRODUCT_DC_4COM               0x00a5
+#define MEDIATEK_PRODUCT_DC_5COM               0x00a4
+#define MEDIATEK_PRODUCT_7208_1COM             0x7101
+#define MEDIATEK_PRODUCT_7208_2COM             0x7102
+#define MEDIATEK_PRODUCT_FP_1COM               0x0003
+#define MEDIATEK_PRODUCT_FP_2COM               0x0023
+#define MEDIATEK_PRODUCT_FPDC_1COM             0x0043
+#define MEDIATEK_PRODUCT_FPDC_2COM             0x0033
 
 /* Cellient products */
 #define CELLIENT_VENDOR_ID                     0x2692
@@ -554,6 +563,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
        .reserved = BIT(1),
 };
 
+static const struct option_blacklist_info net_intf2_blacklist = {
+       .reserved = BIT(2),
+};
+
 static const struct option_blacklist_info net_intf3_blacklist = {
        .reserved = BIT(3),
 };
@@ -1099,6 +1112,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
          0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1240,6 +1255,17 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) },        /* MediaTek MT6276M modem & app port */
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
        { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
        { } /* Terminating entry */
 };
index 5066eee10ccf78b641044e4541c022229617dbfc..58bd9c27369df9d4a39d1352f9d9cad74ec51b37 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/io.h>
 #include <linux/device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
 
 #include <video/omapdss.h>
 
@@ -201,6 +202,28 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
 #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 
 /* PLATFORM DEVICE */
+static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+{
+       DSSDBG("pm notif %lu\n", v);
+
+       switch (v) {
+       case PM_SUSPEND_PREPARE:
+               DSSDBG("suspending displays\n");
+               return dss_suspend_all_devices();
+
+       case PM_POST_SUSPEND:
+               DSSDBG("resuming displays\n");
+               return dss_resume_all_devices();
+
+       default:
+               return 0;
+       }
+}
+
+static struct notifier_block omap_dss_pm_notif_block = {
+       .notifier_call = omap_dss_pm_notif,
+};
+
 static int __init omap_dss_probe(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
@@ -224,6 +247,8 @@ static int __init omap_dss_probe(struct platform_device *pdev)
        else if (pdata->default_device)
                core.default_display_name = pdata->default_device->name;
 
+       register_pm_notifier(&omap_dss_pm_notif_block);
+
        return 0;
 
 err_debugfs:
@@ -233,6 +258,8 @@ err_debugfs:
 
 static int omap_dss_remove(struct platform_device *pdev)
 {
+       unregister_pm_notifier(&omap_dss_pm_notif_block);
+
        dss_uninitialize_debugfs();
 
        dss_uninit_overlays(pdev);
@@ -247,25 +274,9 @@ static void omap_dss_shutdown(struct platform_device *pdev)
        dss_disable_all_devices();
 }
 
-static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       DSSDBG("suspend %d\n", state.event);
-
-       return dss_suspend_all_devices();
-}
-
-static int omap_dss_resume(struct platform_device *pdev)
-{
-       DSSDBG("resume\n");
-
-       return dss_resume_all_devices();
-}
-
 static struct platform_driver omap_dss_driver = {
        .remove         = omap_dss_remove,
        .shutdown       = omap_dss_shutdown,
-       .suspend        = omap_dss_suspend,
-       .resume         = omap_dss_resume,
        .driver         = {
                .name   = "omapdss",
                .owner  = THIS_MODULE,
index 4749ac356469ea8645b3e92c1d72512a102f8e36..397d4eee11bb7715d9d01e3b8a1330abcdc2f2e4 100644 (file)
@@ -384,7 +384,7 @@ void dispc_runtime_put(void)
        DSSDBG("dispc_runtime_put\n");
 
        r = pm_runtime_put_sync(&dispc.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
index ca8382d346e9bb0af2d8b749efd0e6d6ae25ff09..14ce8cc079e3d1840e982734b4e2644770ba9b72 100644 (file)
@@ -1075,7 +1075,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
        DSSDBG("dsi_runtime_put\n");
 
        r = pm_runtime_put_sync(&dsi->pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 /* source clock for DSI PLL. this could also be PCLKFREE */
index 770632359a176b74bd2b62f78bc83a3ac966cbd0..d2b57197b292086cd95a32ef1b33136c0e216166 100644 (file)
@@ -731,7 +731,7 @@ static void dss_runtime_put(void)
        DSSDBG("dss_runtime_put\n");
 
        r = pm_runtime_put_sync(&dss.pdev->dev);
-       WARN_ON(r < 0 && r != -EBUSY);
+       WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
 }
 
 /* DEBUGFS */
index 8195c7166d200c8e575aba54f5e4f46c335df002..26a2430a70288d6cf468357d854fe3429b245f72 100644 (file)
@@ -138,7 +138,7 @@ static void hdmi_runtime_put(void)
        DSSDBG("hdmi_runtime_put\n");
 
        r = pm_runtime_put_sync(&hdmi.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static int __init hdmi_init_display(struct omap_dss_device *dssdev)
index 3d8c206e90e5d93631a0dc493f9ab180fbfa181e..7985fa12b9b46c8c3f6da328f578a76b6267a66e 100644 (file)
@@ -141,7 +141,7 @@ static void rfbi_runtime_put(void)
        DSSDBG("rfbi_runtime_put\n");
 
        r = pm_runtime_put_sync(&rfbi.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 void rfbi_bus_lock(void)
index 2b8973931ff48e845cd9a1386b7b437b369b24e4..3907c8b6ecbca991e3cc9313c1a1473c247cb216 100644 (file)
@@ -402,7 +402,7 @@ static void venc_runtime_put(void)
        DSSDBG("venc_runtime_put\n");
 
        r = pm_runtime_put_sync(&venc.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static const struct venc_config *venc_timings_to_config(
index bfbc15ca38ddd3a0ac57cf0860624558a35aee47..0908e604433303d605b9a7cd6ab4ffe36087f96f 100644 (file)
@@ -47,7 +47,7 @@ struct virtio_balloon
        struct task_struct *thread;
 
        /* Waiting for host to ack the pages we released. */
-       struct completion acked;
+       wait_queue_head_t acked;
 
        /* Number of balloon pages we've told the Host we're not using. */
        unsigned int num_pages;
@@ -89,29 +89,25 @@ static struct page *balloon_pfn_to_page(u32 pfn)
 
 static void balloon_ack(struct virtqueue *vq)
 {
-       struct virtio_balloon *vb;
-       unsigned int len;
+       struct virtio_balloon *vb = vq->vdev->priv;
 
-       vb = virtqueue_get_buf(vq, &len);
-       if (vb)
-               complete(&vb->acked);
+       wake_up(&vb->acked);
 }
 
 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
 {
        struct scatterlist sg;
+       unsigned int len;
 
        sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
 
-       init_completion(&vb->acked);
-
        /* We should always be able to add one buffer to an empty queue. */
        if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
                BUG();
        virtqueue_kick(vq);
 
        /* When host has read buffer, this completes via balloon_ack */
-       wait_for_completion(&vb->acked);
+       wait_event(vb->acked, virtqueue_get_buf(vq, &len));
 }
 
 static void set_page_pfns(u32 pfns[], struct page *page)
@@ -231,12 +227,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
  */
 static void stats_request(struct virtqueue *vq)
 {
-       struct virtio_balloon *vb;
-       unsigned int len;
+       struct virtio_balloon *vb = vq->vdev->priv;
 
-       vb = virtqueue_get_buf(vq, &len);
-       if (!vb)
-               return;
        vb->need_stats_update = 1;
        wake_up(&vb->config_change);
 }
@@ -245,11 +237,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
 {
        struct virtqueue *vq;
        struct scatterlist sg;
+       unsigned int len;
 
        vb->need_stats_update = 0;
        update_balloon_stats(vb);
 
        vq = vb->stats_vq;
+       if (!virtqueue_get_buf(vq, &len))
+               return;
        sg_init_one(&sg, vb->stats, sizeof(vb->stats));
        if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
                BUG();
@@ -358,6 +353,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
        INIT_LIST_HEAD(&vb->pages);
        vb->num_pages = 0;
        init_waitqueue_head(&vb->config_change);
+       init_waitqueue_head(&vb->acked);
        vb->vdev = vdev;
        vb->need_stats_update = 0;
 
index 7301cdb4b2cb3cf6c5d8626b2275a85f88d4f55f..a383c18e74e86eebaa847d756e3493e7ca3c9bfd 100644 (file)
@@ -301,10 +301,14 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                goto out;
 
        eb = path->nodes[level];
-       if (!eb) {
-               WARN_ON(1);
-               ret = 1;
-               goto out;
+       while (!eb) {
+               if (!level) {
+                       WARN_ON(1);
+                       ret = 1;
+                       goto out;
+               }
+               level--;
+               eb = path->nodes[level];
        }
 
        ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
@@ -835,6 +839,7 @@ again:
                        }
                        ret = __add_delayed_refs(head, delayed_ref_seq,
                                                 &prefs_delayed);
+                       mutex_unlock(&head->mutex);
                        if (ret) {
                                spin_unlock(&delayed_refs->lock);
                                goto out;
@@ -928,8 +933,6 @@ again:
        }
 
 out:
-       if (head)
-               mutex_unlock(&head->mutex);
        btrfs_free_path(path);
        while (!list_empty(&prefs)) {
                ref = list_first_entry(&prefs, struct __prelim_ref, list);
index 15cbc2bf4ff0ed11f8bb2e163bc1960c65b27fa7..8206b3900587efa23570b5b8f7f8071ab6e73156 100644 (file)
@@ -1024,11 +1024,18 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
                if (!looped && !tm)
                        return 0;
                /*
-                * we must have key remove operations in the log before the
-                * replace operation.
+                * if there are no tree operation for the oldest root, we simply
+                * return it. this should only happen if that (old) root is at
+                * level 0.
                 */
-               BUG_ON(!tm);
+               if (!tm)
+                       break;
 
+               /*
+                * if there's an operation that's not a root replacement, we
+                * found the oldest version of our root. normally, we'll find a
+                * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
+                */
                if (tm->op != MOD_LOG_ROOT_REPLACE)
                        break;
 
@@ -1087,11 +1094,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
                                                      tm->generation);
                        break;
                case MOD_LOG_KEY_ADD:
-                       if (tm->slot != n - 1) {
-                               o_dst = btrfs_node_key_ptr_offset(tm->slot);
-                               o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
-                               memmove_extent_buffer(eb, o_dst, o_src, p_size);
-                       }
+                       /* if a move operation is needed it's in the log */
                        n--;
                        break;
                case MOD_LOG_MOVE_KEYS:
@@ -1192,16 +1195,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        }
 
        tm = tree_mod_log_search(root->fs_info, logical, time_seq);
-       /*
-        * there was an item in the log when __tree_mod_log_oldest_root
-        * returned. this one must not go away, because the time_seq passed to
-        * us must be blocking its removal.
-        */
-       BUG_ON(!tm);
-
        if (old_root)
-               eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
-                                              root->nodesize);
+               eb = alloc_dummy_extent_buffer(logical, root->nodesize);
        else
                eb = btrfs_clone_extent_buffer(root->node);
        btrfs_tree_read_unlock(root->node);
@@ -1216,7 +1211,10 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
                btrfs_set_header_level(eb, old_root->level);
                btrfs_set_header_generation(eb, old_generation);
        }
-       __tree_mod_log_rewind(eb, time_seq, tm);
+       if (tm)
+               __tree_mod_log_rewind(eb, time_seq, tm);
+       else
+               WARN_ON(btrfs_header_level(eb) != 0);
        extent_buffer_get(eb);
 
        return eb;
@@ -2995,7 +2993,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 static void insert_ptr(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct btrfs_path *path,
                       struct btrfs_disk_key *key, u64 bytenr,
-                      int slot, int level, int tree_mod_log)
+                      int slot, int level)
 {
        struct extent_buffer *lower;
        int nritems;
@@ -3008,7 +3006,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
        BUG_ON(slot > nritems);
        BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
        if (slot != nritems) {
-               if (tree_mod_log && level)
+               if (level)
                        tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
                                             slot, nritems - slot);
                memmove_extent_buffer(lower,
@@ -3016,7 +3014,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
                              btrfs_node_key_ptr_offset(slot),
                              (nritems - slot) * sizeof(struct btrfs_key_ptr));
        }
-       if (tree_mod_log && level) {
+       if (level) {
                ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
                                              MOD_LOG_KEY_ADD);
                BUG_ON(ret < 0);
@@ -3104,7 +3102,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(split);
 
        insert_ptr(trans, root, path, &disk_key, split->start,
-                  path->slots[level + 1] + 1, level + 1, 1);
+                  path->slots[level + 1] + 1, level + 1);
 
        if (path->slots[level] >= mid) {
                path->slots[level] -= mid;
@@ -3641,7 +3639,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
        btrfs_set_header_nritems(l, mid);
        btrfs_item_key(right, &disk_key, 0);
        insert_ptr(trans, root, path, &disk_key, right->start,
-                  path->slots[1] + 1, 1, 0);
+                  path->slots[1] + 1, 1);
 
        btrfs_mark_buffer_dirty(right);
        btrfs_mark_buffer_dirty(l);
@@ -3848,7 +3846,7 @@ again:
                if (mid <= slot) {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                  path->slots[1] + 1, 1, 0);
+                                  path->slots[1] + 1, 1);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -3857,7 +3855,7 @@ again:
                } else {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                         path->slots[1], 1, 0);
+                                         path->slots[1], 1);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -5121,6 +5119,18 @@ again:
 
                if (!path->skip_locking) {
                        ret = btrfs_try_tree_read_lock(next);
+                       if (!ret && time_seq) {
+                               /*
+                                * If we don't get the lock, we may be racing
+                                * with push_leaf_left, holding that lock while
+                                * itself waiting for the leaf we've currently
+                                * locked. To solve this situation, we give up
+                                * on our lock and cycle.
+                                */
+                               btrfs_release_path(path);
+                               cond_resched();
+                               goto again;
+                       }
                        if (!ret) {
                                btrfs_set_path_blocking(path);
                                btrfs_tree_read_lock(next);
index 7b845ff4af99fb643bd959e52dd6f202b7dee7ef..2936ca49b3b4af3a799f7585b74038d289ab8278 100644 (file)
@@ -2354,12 +2354,17 @@ retry_root_backup:
                                  BTRFS_CSUM_TREE_OBJECTID, csum_root);
        if (ret)
                goto recovery_tree_root;
-
        csum_root->track_dirty = 1;
 
        fs_info->generation = generation;
        fs_info->last_trans_committed = generation;
 
+       ret = btrfs_recover_balance(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to recover balance\n");
+               goto fail_block_groups;
+       }
+
        ret = btrfs_init_dev_stats(fs_info);
        if (ret) {
                printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
@@ -2485,20 +2490,23 @@ retry_root_backup:
                goto fail_trans_kthread;
        }
 
-       if (!(sb->s_flags & MS_RDONLY)) {
-               down_read(&fs_info->cleanup_work_sem);
-               err = btrfs_orphan_cleanup(fs_info->fs_root);
-               if (!err)
-                       err = btrfs_orphan_cleanup(fs_info->tree_root);
-               up_read(&fs_info->cleanup_work_sem);
+       if (sb->s_flags & MS_RDONLY)
+               return 0;
 
-               if (!err)
-                       err = btrfs_recover_balance(fs_info->tree_root);
+       down_read(&fs_info->cleanup_work_sem);
+       if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
+           (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
+               up_read(&fs_info->cleanup_work_sem);
+               close_ctree(tree_root);
+               return ret;
+       }
+       up_read(&fs_info->cleanup_work_sem);
 
-               if (err) {
-                       close_ctree(tree_root);
-                       return err;
-               }
+       ret = btrfs_resume_balance_async(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to resume balance\n");
+               close_ctree(tree_root);
+               return ret;
        }
 
        return 0;
index 4b5a1e1bdefbe095c239b464e55c9699e865175b..6e1d36702ff71c4fc5bde2733cbf166376b726d3 100644 (file)
@@ -2347,12 +2347,10 @@ next:
        return count;
 }
 
-
 static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
-                       unsigned long num_refs)
+                              unsigned long num_refs,
+                              struct list_head *first_seq)
 {
-       struct list_head *first_seq = delayed_refs->seq_head.next;
-
        spin_unlock(&delayed_refs->lock);
        pr_debug("waiting for more refs (num %ld, first %p)\n",
                 num_refs, first_seq);
@@ -2381,6 +2379,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_root *delayed_refs;
        struct btrfs_delayed_ref_node *ref;
        struct list_head cluster;
+       struct list_head *first_seq = NULL;
        int ret;
        u64 delayed_start;
        int run_all = count == (unsigned long)-1;
@@ -2436,8 +2435,10 @@ again:
                                 */
                                consider_waiting = 1;
                                num_refs = delayed_refs->num_entries;
+                               first_seq = root->fs_info->tree_mod_seq_list.next;
                        } else {
-                               wait_for_more_refs(delayed_refs, num_refs);
+                               wait_for_more_refs(delayed_refs,
+                                                  num_refs, first_seq);
                                /*
                                 * after waiting, things have changed. we
                                 * dropped the lock and someone else might have
index aaa12c1eb3483a8371df48e5765b986c436451c4..01c21b6c6d43e44a28a4c862ca6532d6f8b02654 100644 (file)
@@ -3324,6 +3324,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
                             writepage_t writepage, void *data,
                             void (*flush_fn)(void *))
 {
+       struct inode *inode = mapping->host;
        int ret = 0;
        int done = 0;
        int nr_to_write_done = 0;
@@ -3334,6 +3335,18 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
        int scanned = 0;
        int tag;
 
+       /*
+        * We have to hold onto the inode so that ordered extents can do their
+        * work when the IO finishes.  The alternative to this is failing to add
+        * an ordered extent if the igrab() fails there and that is a huge pain
+        * to deal with, so instead just hold onto the inode throughout the
+        * writepages operation.  If it fails here we are freeing up the inode
+        * anyway and we'd rather not waste our time writing out stuff that is
+        * going to be truncated anyway.
+        */
+       if (!igrab(inode))
+               return 0;
+
        pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
@@ -3428,6 +3441,7 @@ retry:
                index = 0;
                goto retry;
        }
+       btrfs_add_delayed_iput(inode);
        return ret;
 }
 
index 70dc8ca73e257bc3a1e7a96ea48009bff093af9a..9aa01ec2138d466f3d1726ccd6291d054c5e5c98 100644 (file)
@@ -1334,7 +1334,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
                                    loff_t *ppos, size_t count, size_t ocount)
 {
        struct file *file = iocb->ki_filp;
-       struct inode *inode = fdentry(file)->d_inode;
        struct iov_iter i;
        ssize_t written;
        ssize_t written_buffered;
@@ -1344,18 +1343,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
        written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
                                            count, ocount);
 
-       /*
-        * the generic O_DIRECT will update in-memory i_size after the
-        * DIOs are done.  But our endio handlers that update the on
-        * disk i_size never update past the in memory i_size.  So we
-        * need one more update here to catch any additions to the
-        * file
-        */
-       if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
-               btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
-               mark_inode_dirty(inode);
-       }
-
        if (written < 0 || written == count)
                return written;
 
index 81296c57405a5d53a27dba626a4d6201829bd578..6c4e2baa9290e16d06a4a8dfc5c3b05880cfeb27 100644 (file)
@@ -1543,29 +1543,26 @@ again:
        end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
 
        /*
-        * XXX - this can go away after a few releases.
-        *
-        * since the only user of btrfs_remove_free_space is the tree logging
-        * stuff, and the only way to test that is under crash conditions, we
-        * want to have this debug stuff here just in case somethings not
-        * working.  Search the bitmap for the space we are trying to use to
-        * make sure its actually there.  If its not there then we need to stop
-        * because something has gone wrong.
+        * We need to search for bits in this bitmap.  We could only cover some
+        * of the extent in this bitmap thanks to how we add space, so we need
+        * to search for as much as it as we can and clear that amount, and then
+        * go searching for the next bit.
         */
        search_start = *offset;
-       search_bytes = *bytes;
+       search_bytes = ctl->unit;
        search_bytes = min(search_bytes, end - search_start + 1);
        ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
        BUG_ON(ret < 0 || search_start != *offset);
 
-       if (*offset > bitmap_info->offset && *offset + *bytes > end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
-               *bytes -= end - *offset + 1;
-               *offset = end + 1;
-       } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
-               *bytes = 0;
-       }
+       /* We may have found more bits than what we need */
+       search_bytes = min(search_bytes, *bytes);
+
+       /* Cannot clear past the end of the bitmap */
+       search_bytes = min(search_bytes, end - search_start + 1);
+
+       bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
+       *offset += search_bytes;
+       *bytes -= search_bytes;
 
        if (*bytes) {
                struct rb_node *next = rb_next(&bitmap_info->offset_index);
@@ -1596,7 +1593,7 @@ again:
                 * everything over again.
                 */
                search_start = *offset;
-               search_bytes = *bytes;
+               search_bytes = ctl->unit;
                ret = search_bitmap(ctl, bitmap_info, &search_start,
                                    &search_bytes);
                if (ret < 0 || search_start != *offset)
@@ -1879,12 +1876,14 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
-       struct btrfs_free_space *next_info = NULL;
        int ret = 0;
 
        spin_lock(&ctl->tree_lock);
 
 again:
+       if (!bytes)
+               goto out_lock;
+
        info = tree_search_offset(ctl, offset, 0, 0);
        if (!info) {
                /*
@@ -1905,88 +1904,48 @@ again:
                }
        }
 
-       if (info->bytes < bytes && rb_next(&info->offset_index)) {
-               u64 end;
-               next_info = rb_entry(rb_next(&info->offset_index),
-                                            struct btrfs_free_space,
-                                            offset_index);
-
-               if (next_info->bitmap)
-                       end = next_info->offset +
-                             BITS_PER_BITMAP * ctl->unit - 1;
-               else
-                       end = next_info->offset + next_info->bytes;
-
-               if (next_info->bytes < bytes ||
-                   next_info->offset > offset || offset > end) {
-                       printk(KERN_CRIT "Found free space at %llu, size %llu,"
-                             " trying to use %llu\n",
-                             (unsigned long long)info->offset,
-                             (unsigned long long)info->bytes,
-                             (unsigned long long)bytes);
-                       WARN_ON(1);
-                       ret = -EINVAL;
-                       goto out_lock;
-               }
-
-               info = next_info;
-       }
-
-       if (info->bytes == bytes) {
+       if (!info->bitmap) {
                unlink_free_space(ctl, info);
-               if (info->bitmap) {
-                       kfree(info->bitmap);
-                       ctl->total_bitmaps--;
-               }
-               kmem_cache_free(btrfs_free_space_cachep, info);
-               ret = 0;
-               goto out_lock;
-       }
-
-       if (!info->bitmap && info->offset == offset) {
-               unlink_free_space(ctl, info);
-               info->offset += bytes;
-               info->bytes -= bytes;
-               ret = link_free_space(ctl, info);
-               WARN_ON(ret);
-               goto out_lock;
-       }
+               if (offset == info->offset) {
+                       u64 to_free = min(bytes, info->bytes);
+
+                       info->bytes -= to_free;
+                       info->offset += to_free;
+                       if (info->bytes) {
+                               ret = link_free_space(ctl, info);
+                               WARN_ON(ret);
+                       } else {
+                               kmem_cache_free(btrfs_free_space_cachep, info);
+                       }
 
-       if (!info->bitmap && info->offset <= offset &&
-           info->offset + info->bytes >= offset + bytes) {
-               u64 old_start = info->offset;
-               /*
-                * we're freeing space in the middle of the info,
-                * this can happen during tree log replay
-                *
-                * first unlink the old info and then
-                * insert it again after the hole we're creating
-                */
-               unlink_free_space(ctl, info);
-               if (offset + bytes < info->offset + info->bytes) {
-                       u64 old_end = info->offset + info->bytes;
+                       offset += to_free;
+                       bytes -= to_free;
+                       goto again;
+               } else {
+                       u64 old_end = info->bytes + info->offset;
 
-                       info->offset = offset + bytes;
-                       info->bytes = old_end - info->offset;
+                       info->bytes = offset - info->offset;
                        ret = link_free_space(ctl, info);
                        WARN_ON(ret);
                        if (ret)
                                goto out_lock;
-               } else {
-                       /* the hole we're creating ends at the end
-                        * of the info struct, just free the info
-                        */
-                       kmem_cache_free(btrfs_free_space_cachep, info);
-               }
-               spin_unlock(&ctl->tree_lock);
 
-               /* step two, insert a new info struct to cover
-                * anything before the hole
-                */
-               ret = btrfs_add_free_space(block_group, old_start,
-                                          offset - old_start);
-               WARN_ON(ret); /* -ENOMEM */
-               goto out;
+                       /* Not enough bytes in this entry to satisfy us */
+                       if (old_end < offset + bytes) {
+                               bytes -= old_end - offset;
+                               offset = old_end;
+                               goto again;
+                       } else if (old_end == offset + bytes) {
+                               /* all done */
+                               goto out_lock;
+                       }
+                       spin_unlock(&ctl->tree_lock);
+
+                       ret = btrfs_add_free_space(block_group, offset + bytes,
+                                                  old_end - (offset + bytes));
+                       WARN_ON(ret);
+                       goto out;
+               }
        }
 
        ret = remove_from_bitmap(ctl, info, &offset, &bytes);
index d8bb0dbc4941ca3f288119134df64eeb1eb679da..a7d1921ac76b8ee5d85d563c1ceed80f4a65a2dd 100644 (file)
@@ -3754,7 +3754,7 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
        if (root->fs_info->log_root_recovering) {
-               BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+               BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
                                 &BTRFS_I(inode)->runtime_flags));
                goto no_delete;
        }
@@ -5876,8 +5876,17 @@ map:
        bh_result->b_size = len;
        bh_result->b_bdev = em->bdev;
        set_buffer_mapped(bh_result);
-       if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
-               set_buffer_new(bh_result);
+       if (create) {
+               if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+                       set_buffer_new(bh_result);
+
+               /*
+                * Need to update the i_size under the extent lock so buffered
+                * readers will get the updated i_size when we unlock.
+                */
+               if (start + len > i_size_read(inode))
+                       i_size_write(inode, start + len);
+       }
 
        free_extent_map(em);
 
@@ -6360,12 +6369,48 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                 */
                ordered = btrfs_lookup_ordered_range(inode, lockstart,
                                                     lockend - lockstart + 1);
-               if (!ordered)
+
+               /*
+                * We need to make sure there are no buffered pages in this
+                * range either, we could have raced between the invalidate in
+                * generic_file_direct_write and locking the extent.  The
+                * invalidate needs to happen so that reads after a write do not
+                * get stale data.
+                */
+               if (!ordered && (!writing ||
+                   !test_range_bit(&BTRFS_I(inode)->io_tree,
+                                   lockstart, lockend, EXTENT_UPTODATE, 0,
+                                   cached_state)))
                        break;
+
                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                                     &cached_state, GFP_NOFS);
-               btrfs_start_ordered_extent(inode, ordered, 1);
-               btrfs_put_ordered_extent(ordered);
+
+               if (ordered) {
+                       btrfs_start_ordered_extent(inode, ordered, 1);
+                       btrfs_put_ordered_extent(ordered);
+               } else {
+                       /* Screw you mmap */
+                       ret = filemap_write_and_wait_range(file->f_mapping,
+                                                          lockstart,
+                                                          lockend);
+                       if (ret)
+                               goto out;
+
+                       /*
+                        * If we found a page that couldn't be invalidated just
+                        * fall back to buffered.
+                        */
+                       ret = invalidate_inode_pages2_range(file->f_mapping,
+                                       lockstart >> PAGE_CACHE_SHIFT,
+                                       lockend >> PAGE_CACHE_SHIFT);
+                       if (ret) {
+                               if (ret == -EBUSY)
+                                       ret = 0;
+                               goto out;
+                       }
+               }
+
                cond_resched();
        }
 
index 497c530724cf6b7a50296d2c6660fef7f4066cb9..e440aa653c30d6f6c8ad1e7437bfa6e2b4d50799 100644 (file)
@@ -339,7 +339,7 @@ struct btrfs_ioctl_get_dev_stats {
 #define BTRFS_IOC_WAIT_SYNC  _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
 #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
                                   struct btrfs_ioctl_vol_args_v2)
-#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
+#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
 #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
 #define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
                              struct btrfs_ioctl_scrub_args)
index 0eb9a4da069e6cb1e3c4294d4ad02e2ab92da666..e23991574fdf309e6ed95e23985fc5dd0af28e0f 100644 (file)
@@ -1187,6 +1187,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
 
+               ret = btrfs_resume_balance_async(fs_info);
+               if (ret)
+                       goto restore;
+
                sb->s_flags &= ~MS_RDONLY;
        }
 
index 2017d0ff511ca3304dad46e85045ad2ab28d4e75..8abeae4224f92dbd870877b76224515c304e979d 100644 (file)
@@ -690,6 +690,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
        kfree(name);
 
        iput(inode);
+
+       btrfs_run_delayed_items(trans, root);
        return ret;
 }
 
@@ -895,6 +897,7 @@ again:
                                ret = btrfs_unlink_inode(trans, root, dir,
                                                         inode, victim_name,
                                                         victim_name_len);
+                               btrfs_run_delayed_items(trans, root);
                        }
                        kfree(victim_name);
                        ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
@@ -1475,6 +1478,9 @@ again:
                        ret = btrfs_unlink_inode(trans, root, dir, inode,
                                                 name, name_len);
                        BUG_ON(ret);
+
+                       btrfs_run_delayed_items(trans, root);
+
                        kfree(name);
                        iput(inode);
 
index 8a3d2594b80726b3a4da08c5924cb51b8e28a0ba..ecaad40e7ef499fedb7667659d9efd955a661eaa 100644 (file)
@@ -2845,31 +2845,48 @@ out:
 
 static int balance_kthread(void *data)
 {
-       struct btrfs_balance_control *bctl =
-                       (struct btrfs_balance_control *)data;
-       struct btrfs_fs_info *fs_info = bctl->fs_info;
+       struct btrfs_fs_info *fs_info = data;
        int ret = 0;
 
        mutex_lock(&fs_info->volume_mutex);
        mutex_lock(&fs_info->balance_mutex);
 
-       set_balance_control(bctl);
-
-       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
-               printk(KERN_INFO "btrfs: force skipping balance\n");
-       } else {
+       if (fs_info->balance_ctl) {
                printk(KERN_INFO "btrfs: continuing balance\n");
-               ret = btrfs_balance(bctl, NULL);
+               ret = btrfs_balance(fs_info->balance_ctl, NULL);
        }
 
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
+
        return ret;
 }
 
-int btrfs_recover_balance(struct btrfs_root *tree_root)
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
 {
        struct task_struct *tsk;
+
+       spin_lock(&fs_info->balance_lock);
+       if (!fs_info->balance_ctl) {
+               spin_unlock(&fs_info->balance_lock);
+               return 0;
+       }
+       spin_unlock(&fs_info->balance_lock);
+
+       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+               printk(KERN_INFO "btrfs: force skipping balance\n");
+               return 0;
+       }
+
+       tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
+       if (IS_ERR(tsk))
+               return PTR_ERR(tsk);
+
+       return 0;
+}
+
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
+{
        struct btrfs_balance_control *bctl;
        struct btrfs_balance_item *item;
        struct btrfs_disk_balance_args disk_bargs;
@@ -2882,29 +2899,30 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        if (!path)
                return -ENOMEM;
 
-       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
-       if (!bctl) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
        key.objectid = BTRFS_BALANCE_OBJECTID;
        key.type = BTRFS_BALANCE_ITEM_KEY;
        key.offset = 0;
 
-       ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
        if (ret < 0)
-               goto out_bctl;
+               goto out;
        if (ret > 0) { /* ret = -ENOENT; */
                ret = 0;
-               goto out_bctl;
+               goto out;
+       }
+
+       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+       if (!bctl) {
+               ret = -ENOMEM;
+               goto out;
        }
 
        leaf = path->nodes[0];
        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
 
-       bctl->fs_info = tree_root->fs_info;
-       bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
+       bctl->fs_info = fs_info;
+       bctl->flags = btrfs_balance_flags(leaf, item);
+       bctl->flags |= BTRFS_BALANCE_RESUME;
 
        btrfs_balance_data(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
@@ -2913,14 +2931,13 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        btrfs_balance_sys(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
 
-       tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
-       if (IS_ERR(tsk))
-               ret = PTR_ERR(tsk);
-       else
-               goto out;
+       mutex_lock(&fs_info->volume_mutex);
+       mutex_lock(&fs_info->balance_mutex);
 
-out_bctl:
-       kfree(bctl);
+       set_balance_control(bctl);
+
+       mutex_unlock(&fs_info->balance_mutex);
+       mutex_unlock(&fs_info->volume_mutex);
 out:
        btrfs_free_path(path);
        return ret;
@@ -4061,16 +4078,18 @@ static void btrfs_end_bio(struct bio *bio, int err)
 
                        BUG_ON(stripe_index >= bbio->num_stripes);
                        dev = bbio->stripes[stripe_index].dev;
-                       if (bio->bi_rw & WRITE)
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_WRITE_ERRS);
-                       else
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_READ_ERRS);
-                       if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_FLUSH_ERRS);
-                       btrfs_dev_stat_print_on_error(dev);
+                       if (dev->bdev) {
+                               if (bio->bi_rw & WRITE)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_WRITE_ERRS);
+                               else
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_READ_ERRS);
+                               if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_FLUSH_ERRS);
+                               btrfs_dev_stat_print_on_error(dev);
+                       }
                }
        }
 
index 74366f27a76bbc7272e4b41f2f92bf9520bca813..95f6637614db5932f8d52daba79943514a7ed8da 100644 (file)
@@ -281,7 +281,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
 int btrfs_balance(struct btrfs_balance_control *bctl,
                  struct btrfs_ioctl_balance_args *bargs);
-int btrfs_recover_balance(struct btrfs_root *tree_root);
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
 int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
index 838a9cf246bd0fa561ab66295f9bb3df77e0c6a2..c7062c896d7c20489f41ea838640a5cfa5387ca9 100644 (file)
@@ -1036,6 +1036,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
 static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
+       int ret;
+       struct buffer_head *bh;
+
        /* Size must be multiple of hard sectorsize */
        if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
@@ -1048,20 +1051,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
                return NULL;
        }
 
-       for (;;) {
-               struct buffer_head * bh;
-               int ret;
+retry:
+       bh = __find_get_block(bdev, block, size);
+       if (bh)
+               return bh;
 
+       ret = grow_buffers(bdev, block, size);
+       if (ret == 0) {
+               free_more_memory();
+               goto retry;
+       } else if (ret > 0) {
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
-
-               ret = grow_buffers(bdev, block, size);
-               if (ret < 0)
-                       return NULL;
-               if (ret == 0)
-                       free_more_memory();
        }
+       return NULL;
 }
 
 /*
index 78db68a5cf44c83ddc8f60cfa89e4796d8fa0ffc..0ae86ddf22138180383542548b3fb7e003c94453 100644 (file)
@@ -1653,24 +1653,26 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                         * If yes, we have encountered a double deliminator
                         * reset the NULL character to the deliminator
                         */
-                       if (tmp_end < end && tmp_end[1] == delim)
+                       if (tmp_end < end && tmp_end[1] == delim) {
                                tmp_end[0] = delim;
 
-                       /* Keep iterating until we get to a single deliminator
-                        * OR the end
-                        */
-                       while ((tmp_end = strchr(tmp_end, delim)) != NULL &&
-                              (tmp_end[1] == delim)) {
-                               tmp_end = (char *) &tmp_end[2];
-                       }
+                               /* Keep iterating until we get to a single
+                                * deliminator OR the end
+                                */
+                               while ((tmp_end = strchr(tmp_end, delim))
+                                       != NULL && (tmp_end[1] == delim)) {
+                                               tmp_end = (char *) &tmp_end[2];
+                               }
 
-                       /* Reset var options to point to next element */
-                       if (tmp_end) {
-                               tmp_end[0] = '\0';
-                               options = (char *) &tmp_end[1];
-                       } else
-                               /* Reached the end of the mount option string */
-                               options = end;
+                               /* Reset var options to point to next element */
+                               if (tmp_end) {
+                                       tmp_end[0] = '\0';
+                                       options = (char *) &tmp_end[1];
+                               } else
+                                       /* Reached the end of the mount option
+                                        * string */
+                                       options = end;
+                       }
 
                        /* Now build new password string */
                        temp_len = strlen(value);
@@ -3493,18 +3495,15 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
         * MS-CIFS indicates that servers are only limited by the client's
         * bufsize for reads, testing against win98se shows that it throws
         * INVALID_PARAMETER errors if you try to request too large a read.
+        * OS/2 just sends back short reads.
         *
-        * If the server advertises a MaxBufferSize of less than one page,
-        * assume that it also can't satisfy reads larger than that either.
-        *
-        * FIXME: Is there a better heuristic for this?
+        * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
+        * it can't handle a read request larger than its MaxBufferSize either.
         */
        if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
                defsize = CIFS_DEFAULT_IOSIZE;
        else if (server->capabilities & CAP_LARGE_READ_X)
                defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
-       else if (server->maxBuf >= PAGE_CACHE_SIZE)
-               defsize = CIFSMaxBufSize;
        else
                defsize = server->maxBuf - sizeof(READ_RSP);
 
index 69f994a7d5249589bf594adaa4780bca967f08b2..0dbe58a8b172c1de225b3457600b50d644b1da35 100644 (file)
@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
        (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
        if (!IS_ERR(*lower_file))
                goto out;
-       if (flags & O_RDONLY) {
+       if ((flags & O_ACCMODE) == O_RDONLY) {
                rc = PTR_ERR((*lower_file));
                goto out;
        }
index 3a06f4043df42a811add69fd7ede7b582a94c347..c0038f6566d4df1493d9321821a53adb62b902ab 100644 (file)
@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
        mutex_unlock(&ecryptfs_daemon_hash_mux);
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
                goto out_unlock_daemon;
        }
        daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
+       file->private_data = daemon;
        atomic_inc(&ecryptfs_num_miscdev_opens);
 out_unlock_daemon:
        mutex_unlock(&daemon->mux);
@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
 
        mutex_lock(&ecryptfs_daemon_hash_mux);
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon)
+               daemon = file->private_data;
        mutex_lock(&daemon->mux);
-       BUG_ON(daemon->pid != task_pid(current));
        BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
        daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
        atomic_dec(&ecryptfs_num_miscdev_opens);
@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
                          struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
                          u16 msg_flags, struct ecryptfs_daemon *daemon)
 {
-       int rc = 0;
+       struct ecryptfs_message *msg;
 
-       mutex_lock(&msg_ctx->mux);
-       msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
-                              GFP_KERNEL);
-       if (!msg_ctx->msg) {
-               rc = -ENOMEM;
+       msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
+       if (!msg) {
                printk(KERN_ERR "%s: Out of memory whilst attempting "
                       "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
-                      (sizeof(*msg_ctx->msg) + data_size));
-               goto out_unlock;
+                      (sizeof(*msg) + data_size));
+               return -ENOMEM;
        }
+
+       mutex_lock(&msg_ctx->mux);
+       msg_ctx->msg = msg;
        msg_ctx->msg->index = msg_ctx->index;
        msg_ctx->msg->data_len = data_size;
        msg_ctx->type = msg_type;
        memcpy(msg_ctx->msg->data, data, data_size);
        msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
-       mutex_lock(&daemon->mux);
        list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
+       mutex_unlock(&msg_ctx->mux);
+
+       mutex_lock(&daemon->mux);
        daemon->num_queued_msg_ctx++;
        wake_up_interruptible(&daemon->wait);
        mutex_unlock(&daemon->mux);
-out_unlock:
-       mutex_unlock(&msg_ctx->mux);
-       return rc;
+
+       return 0;
 }
 
 /*
@@ -269,8 +274,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
+       if (task_pid(current) != daemon->pid) {
+               mutex_unlock(&daemon->mux);
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EPERM;
+       }
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
                rc = 0;
                mutex_unlock(&ecryptfs_daemon_hash_mux);
@@ -307,9 +320,6 @@ check_list:
                 * message from the queue; try again */
                goto check_list;
        }
-       BUG_ON(euid != daemon->euid);
-       BUG_ON(current_user_ns() != daemon->user_ns);
-       BUG_ON(task_pid(current) != daemon->pid);
        msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
                                   struct ecryptfs_msg_ctx, daemon_out_list);
        BUG_ON(!msg_ctx);
index a3d81ebf6d864a8c2189147e5771c435473b2e42..0038b32cb36276d537f2ec81a46469bf3ad221b1 100644 (file)
@@ -738,22 +738,21 @@ static int
 fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent)
 {
        int len = *lenp;
-       u32 ipos_h, ipos_m, ipos_l;
+       struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+       loff_t i_pos;
 
        if (len < 5) {
                *lenp = 5;
                return 255; /* no room */
        }
 
-       ipos_h = MSDOS_I(inode)->i_pos >> 8;
-       ipos_m = (MSDOS_I(inode)->i_pos & 0xf0) << 24;
-       ipos_l = (MSDOS_I(inode)->i_pos & 0x0f) << 28;
+       i_pos = fat_i_pos_read(sbi, inode);
        *lenp = 5;
        fh[0] = inode->i_ino;
        fh[1] = inode->i_generation;
-       fh[2] = ipos_h;
-       fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
-       fh[4] = ipos_l;
+       fh[2] = i_pos >> 8;
+       fh[3] = ((i_pos & 0xf0) << 24) | MSDOS_I(inode)->i_logstart;
+       fh[4] = (i_pos & 0x0f) << 28;
        if (parent)
                fh[4] |= MSDOS_I(parent)->i_logstart;
        return 3;
index 814c51d0de4739e4b89e9091e00c17f284c0e2ba..fce6238d52c1bf742307325a588d545e169508e4 100644 (file)
@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        case F_WRLCK:
                return generic_add_lease(filp, arg, flp);
        default:
-               BUG();
+               return -EINVAL;
        }
 }
 EXPORT_SYMBOL(generic_setlease);
index 9a4cbfc85d81fd203efe4db51f8897eb6bf45185..48253372ab1d115def0b81f56b097db6e98a0f88 100644 (file)
@@ -484,6 +484,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 
        list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
                if (!nfs_pageio_add_request(&desc, req)) {
+                       nfs_list_remove_request(req);
                        nfs_list_add_request(req, &failed);
                        spin_lock(cinfo.lock);
                        dreq->flags = 0;
@@ -494,8 +495,11 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
        }
        nfs_pageio_complete(&desc);
 
-       while (!list_empty(&failed))
+       while (!list_empty(&failed)) {
+               req = nfs_list_entry(failed.next);
+               nfs_list_remove_request(req);
                nfs_unlock_and_release_request(req);
+       }
 
        if (put_dreq(dreq))
                nfs_direct_write_complete(dreq, dreq->inode);
index 906f09c7d842522f5defd230c997527b1d94c8e8..06228192f64efb52e8466afd2334bc04952d50a4 100644 (file)
@@ -2860,6 +2860,8 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
 
        dfprintk(MOUNT, "--> nfs4_try_mount()\n");
 
+       mount_info->fill_super = nfs4_fill_super;
+
        export_path = data->nfs_server.export_path;
        data->nfs_server.export_path = "/";
        root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
index 81a4cd22f80be84a06eac2b0fbf4348385d76262..4f7795fb5fc0b78a6f58d28f2de69356ff5e197d 100644 (file)
@@ -456,7 +456,7 @@ static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
        stats->ls_gets++;
        stats->ls_total += ktime_to_ns(kt);
        /* overflow */
-       if (unlikely(stats->ls_gets) == 0) {
+       if (unlikely(stats->ls_gets == 0)) {
                stats->ls_gets++;
                stats->ls_total = ktime_to_ns(kt);
        }
@@ -3932,6 +3932,8 @@ unqueue:
 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
                                        struct ocfs2_lock_res *lockres)
 {
+       unsigned long flags;
+
        assert_spin_locked(&lockres->l_lock);
 
        if (lockres->l_flags & OCFS2_LOCK_FREEING) {
@@ -3945,21 +3947,22 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
 
        lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (list_empty(&lockres->l_blocked_list)) {
                list_add_tail(&lockres->l_blocked_list,
                              &osb->blocked_lock_list);
                osb->blocked_lock_count++;
        }
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 }
 
 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
 {
        unsigned long processed;
+       unsigned long flags;
        struct ocfs2_lock_res *lockres;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        /* grab this early so we know to try again if a state change and
         * wake happens part-way through our work  */
        osb->dc_work_sequence = osb->dc_wake_sequence;
@@ -3972,38 +3975,40 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
                                     struct ocfs2_lock_res, l_blocked_list);
                list_del_init(&lockres->l_blocked_list);
                osb->blocked_lock_count--;
-               spin_unlock(&osb->dc_task_lock);
+               spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 
                BUG_ON(!processed);
                processed--;
 
                ocfs2_process_blocked_lock(osb, lockres);
 
-               spin_lock(&osb->dc_task_lock);
+               spin_lock_irqsave(&osb->dc_task_lock, flags);
        }
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 }
 
 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
 {
        int empty = 0;
+       unsigned long flags;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (list_empty(&osb->blocked_lock_list))
                empty = 1;
 
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
        return empty;
 }
 
 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
 {
        int should_wake = 0;
+       unsigned long flags;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (osb->dc_work_sequence != osb->dc_wake_sequence)
                should_wake = 1;
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 
        return should_wake;
 }
@@ -4033,10 +4038,12 @@ static int ocfs2_downconvert_thread(void *arg)
 
 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
 {
-       spin_lock(&osb->dc_task_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        /* make sure the voting thread gets a swipe at whatever changes
         * the caller may have made to the voting state */
        osb->dc_wake_sequence++;
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
        wake_up(&osb->dc_event);
 }
index 2f5b92ef0e533146007b49d21dd705a242125dc5..70b5863a2d64e05cde6474bd387112f3b470b5e7 100644 (file)
@@ -923,8 +923,6 @@ out_unlock:
 
        ocfs2_inode_unlock(inode, 0);
 out:
-       if (ret && ret != -ENXIO)
-               ret = -ENXIO;
        return ret;
 }
 
index 061591a3ab08a673d73544e819e7b7fd19b2689a..7602783d7f41c9a01827f16446da43e036e6a7cb 100644 (file)
@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        if (ret < 0)
                mlog_errno(ret);
 
-       if (file->f_flags & O_SYNC)
+       if (file && (file->f_flags & O_SYNC))
                handle->h_sync = 1;
 
        ocfs2_commit_trans(osb, handle);
@@ -2422,8 +2422,10 @@ out_dio:
                unaligned_dio = 0;
        }
 
-       if (unaligned_dio)
+       if (unaligned_dio) {
+               ocfs2_iocb_clear_unaligned_aio(iocb);
                atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+       }
 
 out:
        if (rw_level != -1)
index 92fcd575775a0d1123e902b8499cd1b26cd52aab..0a86e302655f3384435ab4843fad50b6bf1a7cae 100644 (file)
@@ -399,8 +399,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
                              msecs_to_jiffies(oinfo->dqi_syncms));
 
 out_err:
-       if (status)
-               mlog_errno(status);
        return status;
 out_unlock:
        ocfs2_unlock_global_qf(oinfo, 0);
index d6c79a0dffc7b0827b09562e11fa0f610af5657d..1540632d8387fe98a51d0193201346acb18ae70e 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -397,10 +397,10 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
 {
        struct file *file;
        struct inode *inode;
-       int error;
+       int error, fput_needed;
 
        error = -EBADF;
-       file = fget(fd);
+       file = fget_raw_light(fd, &fput_needed);
        if (!file)
                goto out;
 
@@ -414,7 +414,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
        if (!error)
                set_fs_pwd(current->fs, &file->f_path);
 out_putf:
-       fput(file);
+       fput_light(file, fput_needed);
 out:
        return error;
 }
index fbb0b478a346fbc77c854696c5e0e508760125ad..d5378d028589843e1cfef1efd5dba0b8cc4072cb 100644 (file)
@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
 
                /* prevent the page from being discarded on memory pressure */
                SetPageDirty(page);
+               SetPageUptodate(page);
 
                unlock_page(page);
                put_page(page);
index c9f1318a3b820b363526576036c4894205552921..7bf08fa22ec9ab122bad5438a02bd78db6f1e885 100644 (file)
@@ -273,13 +273,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
  * Check if we need to grow the arrays holding pages and partial page
  * descriptions.
  */
-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+
+       spd->nr_pages_max = buffers;
+       if (buffers <= PIPE_DEF_BUFFERS)
                return 0;
 
-       spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
-       spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+       spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
+       spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
 
        if (spd->pages && spd->partial)
                return 0;
@@ -289,10 +292,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
        return -ENOMEM;
 }
 
-void splice_shrink_spd(struct pipe_inode_info *pipe,
-                      struct splice_pipe_desc *spd)
+void splice_shrink_spd(struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
                return;
 
        kfree(spd->pages);
@@ -315,6 +317,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -326,7 +329,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        index = *ppos >> PAGE_CACHE_SHIFT;
        loff = *ppos & ~PAGE_CACHE_MASK;
        req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       nr_pages = min(req_pages, pipe->buffers);
+       nr_pages = min(req_pages, spd.nr_pages_max);
 
        /*
         * Lookup the (hopefully) full range of pages we need.
@@ -497,7 +500,7 @@ fill_it:
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return error;
 }
 
@@ -598,6 +601,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &default_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -608,8 +612,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 
        res = -ENOMEM;
        vec = __vec;
-       if (pipe->buffers > PIPE_DEF_BUFFERS) {
-               vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+       if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
+               vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
                if (!vec)
                        goto shrink_ret;
        }
@@ -617,7 +621,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        offset = *ppos & ~PAGE_CACHE_MASK;
        nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-       for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
+       for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
                struct page *page;
 
                page = alloc_page(GFP_USER);
@@ -665,7 +669,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 shrink_ret:
        if (vec != __vec)
                kfree(vec);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return res;
 
 err:
@@ -1614,6 +1618,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &user_page_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1629,13 +1634,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 
        spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
                                            spd.partial, false,
-                                           pipe->buffers);
+                                           spd.nr_pages_max);
        if (spd.nr_pages <= 0)
                ret = spd.nr_pages;
        else
                ret = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 }
 
index 2314ad8b3c9cced6a4679441d7c6b25afe500348..b1a520ec8b59dd0975bc4e738ce2398ca19dc363 100644 (file)
@@ -140,6 +140,7 @@ struct kiocb {
                (x)->ki_dtor = NULL;                    \
                (x)->ki_obj.tsk = tsk;                  \
                (x)->ki_user_data = 0;                  \
+               (x)->private = NULL;                    \
        } while (0)
 
 #define AIO_RING_MAGIC                 0xa10a10a1
index ba43f408baa38907a8f63a64314e07bb622a6e7f..07954b05b86cc3ac9c4f72aa97584b551914631e 100644 (file)
@@ -827,7 +827,6 @@ extern bool __blk_end_request_err(struct request *rq, int error);
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
 extern void blk_abort_request(struct request *);
-extern void blk_abort_queue(struct request_queue *);
 extern void blk_unprep_request(struct request *);
 
 /*
index 324fe08ea3b140b7b8b92f7129ad334df2e260d5..6d6795d46a7509f01e4a12993e9c7de34b02d975 100644 (file)
@@ -91,6 +91,11 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                  unsigned long size,
                                  unsigned long align,
                                  unsigned long goal);
+void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+                                 unsigned long size,
+                                 unsigned long align,
+                                 unsigned long goal,
+                                 unsigned long limit);
 extern void *__alloc_bootmem_low(unsigned long size,
                                 unsigned long align,
                                 unsigned long goal);
index f07fc2d081598ba7c2432a2306081473d3e0bbe1..2e31e8b3a190bb652bb597104139d2f2b440d347 100644 (file)
@@ -22,8 +22,8 @@
 /* Gpio pin is open source */
 #define GPIOF_OPEN_SOURCE      (1 << 3)
 
-#define GPIOF_EXPORT           (1 << 2)
-#define GPIOF_EXPORT_CHANGEABLE        (1 << 3)
+#define GPIOF_EXPORT           (1 << 4)
+#define GPIOF_EXPORT_CHANGEABLE        (1 << 5)
 #define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
 #define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
 
index fd0dc30c9f154af94155b8c8c47e0a228fbd2573..cc07d2777bbe6b11a632840c5f0a867436bbeac5 100644 (file)
@@ -165,6 +165,7 @@ enum  hrtimer_base_type {
  * @lock:              lock protecting the base and associated clock bases
  *                     and timers
  * @active_bases:      Bitfield to mark bases with active timers
+ * @clock_was_set:     Indicates that clock was set from irq context.
  * @expires_next:      absolute time of the next event which was scheduled
  *                     via clock_set_next_event()
  * @hres_active:       State of high resolution mode
@@ -177,7 +178,8 @@ enum  hrtimer_base_type {
  */
 struct hrtimer_cpu_base {
        raw_spinlock_t                  lock;
-       unsigned long                   active_bases;
+       unsigned int                    active_bases;
+       unsigned int                    clock_was_set;
 #ifdef CONFIG_HIGH_RES_TIMERS
        ktime_t                         expires_next;
        int                             hres_active;
@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
 # define MONOTONIC_RES_NSEC    HIGH_RES_NSEC
 # define KTIME_MONOTONIC_RES   KTIME_HIGH_RES
 
+extern void clock_was_set_delayed(void);
+
 #else
 
 # define MONOTONIC_RES_NSEC    LOW_RES_NSEC
@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
        return 0;
 }
+
+static inline void clock_was_set_delayed(void) { }
+
 #endif
 
 extern void clock_was_set(void);
@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
 extern ktime_t ktime_get_real(void);
 extern ktime_t ktime_get_boottime(void);
 extern ktime_t ktime_get_monotonic_offset(void);
+extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
 
 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
 
index a81671453575d800ec3d102e1aa6a8bf517073ce..2740d080ec6b7607fa1daba2bb2f72395fe34a7f 100644 (file)
@@ -116,6 +116,7 @@ struct input_keymap_entry {
 
 /**
  * EVIOCGMTSLOTS(len) - get MT slot values
+ * @len: size of the data buffer in bytes
  *
  * The ioctl buffer argument should be binary equivalent to
  *
index c4464356b35b0af21eaafe6cbd1d2d7b4f549814..96c158a37d3e5ead53765e4bfa2280a82c79be5e 100644 (file)
@@ -815,7 +815,7 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 #ifdef CONFIG_HAVE_KVM_EVENTFD
 
 void kvm_eventfd_init(struct kvm *kvm);
-int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
+int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_irqfd_release(struct kvm *kvm);
 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
@@ -824,7 +824,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
 static inline void kvm_eventfd_init(struct kvm *kvm) {}
 
-static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
        return -EINVAL;
 }
index a6bb102351486a15a8e1bce8a2da4fbfea4037c6..19dc455b4f3dd072d0e58abdfed2de6f8ae5727c 100644 (file)
@@ -50,9 +50,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
                                phys_addr_t size, phys_addr_t align, int nid);
 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
                                   phys_addr_t size, phys_addr_t align);
-int memblock_free_reserved_regions(void);
-int memblock_reserve_reserved_regions(void);
-
+phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
 void memblock_allow_resize(void);
 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 int memblock_add(phys_addr_t base, phys_addr_t size);
index 2427706f78b4d7043b5476b310d5a203631dbf90..68c569fcbb66ff751d689c57e19f196d33b2b0f8 100644 (file)
@@ -694,7 +694,7 @@ typedef struct pglist_data {
                                             range, including holes */
        int node_id;
        wait_queue_head_t kswapd_wait;
-       struct task_struct *kswapd;
+       struct task_struct *kswapd;     /* Protected by lock_memory_hotplug() */
        int kswapd_max_order;
        enum zone_type classzone_idx;
 } pg_data_t;
index fefb4e19bf6a476a634d5384814d347b1179094c..d8c379dba6adbb36ae9df6c18adea304e2c6b45c 100644 (file)
@@ -176,8 +176,6 @@ enum pci_dev_flags {
        PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
        /* Provide indication device is assigned by a Virtual Machine Manager */
        PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
-       /* Device causes system crash if in D3 during S3 sleep */
-       PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
 };
 
 enum pci_irq_reroute_variant {
index 3988012255dc5bf0562758d703f4124656dbfc31..289760f424aaa3247d2e4f66841334c67295c9ef 100644 (file)
  * Changing LSM security domain is considered a new privilege.  So, for example,
  * asking selinux for a specific new context (e.g. with runcon) will result
  * in execve returning -EPERM.
+ *
+ * See Documentation/prctl/no_new_privs.txt for more details.
  */
 #define PR_SET_NO_NEW_PRIVS    38
 #define PR_GET_NO_NEW_PRIVS    39
index 26d1a47591f1534306160811967248ed9ecfcc12..9cac722b169c9a1f5dbcae489d7b4c43a4fbe1a2 100644 (file)
@@ -184,7 +184,6 @@ static inline int rcu_preempt_depth(void)
 /* Internal to kernel */
 extern void rcu_sched_qs(int cpu);
 extern void rcu_bh_qs(int cpu);
-extern void rcu_preempt_note_context_switch(void);
 extern void rcu_check_callbacks(int cpu, int user);
 struct notifier_block;
 extern void rcu_idle_enter(void);
index 854dc4c5c27151adeaba69ddd98ac2b91831c1d4..4e56a9c69a356ca73fcd06cf775f337276f77f2e 100644 (file)
@@ -87,6 +87,10 @@ static inline void kfree_call_rcu(struct rcu_head *head,
 
 #ifdef CONFIG_TINY_RCU
 
+static inline void rcu_preempt_note_context_switch(void)
+{
+}
+
 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
        *delta_jiffies = ULONG_MAX;
@@ -95,6 +99,7 @@ static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 
 #else /* #ifdef CONFIG_TINY_RCU */
 
+void rcu_preempt_note_context_switch(void);
 int rcu_preempt_needs_cpu(void);
 
 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
@@ -108,6 +113,7 @@ static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 static inline void rcu_note_context_switch(int cpu)
 {
        rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch();
 }
 
 /*
index a8e50e44203c868d1957ba6170e9074569f0235e..82a673905edb12ee0ad6e466423439a31b407c49 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
 
 /* The feature bitmap for virtio rpmsg */
 #define VIRTIO_RPMSG_F_NS      0 /* RP supports name service notifications */
@@ -120,7 +122,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
 /**
  * struct rpmsg_endpoint - binds a local rpmsg address to its user
  * @rpdev: rpmsg channel device
+ * @refcount: when this drops to zero, the ept is deallocated
  * @cb: rx callback handler
+ * @cb_lock: must be taken before accessing/changing @cb
  * @addr: local rpmsg address
  * @priv: private data for the driver's use
  *
@@ -140,7 +144,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
  */
 struct rpmsg_endpoint {
        struct rpmsg_channel *rpdev;
+       struct kref refcount;
        rpmsg_rx_cb_t cb;
+       struct mutex cb_lock;
        u32 addr;
        void *priv;
 };
index 4059c0f33f07a7454d545753281898d92e2d657f..4a1f493e0feff18485fc7ccff11da4e7eb12dde7 100644 (file)
@@ -1871,22 +1871,12 @@ static inline void rcu_copy_process(struct task_struct *p)
        INIT_LIST_HEAD(&p->rcu_node_entry);
 }
 
-static inline void rcu_switch_from(struct task_struct *prev)
-{
-       if (prev->rcu_read_lock_nesting != 0)
-               rcu_preempt_note_context_switch();
-}
-
 #else
 
 static inline void rcu_copy_process(struct task_struct *p)
 {
 }
 
-static inline void rcu_switch_from(struct task_struct *prev)
-{
-}
-
 #endif
 
 #ifdef CONFIG_SMP
@@ -1909,6 +1899,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
 }
 #endif
 
+#ifdef CONFIG_NO_HZ
+void calc_load_enter_idle(void);
+void calc_load_exit_idle(void);
+#else
+static inline void calc_load_enter_idle(void) { }
+static inline void calc_load_exit_idle(void) { }
+#endif /* CONFIG_NO_HZ */
+
 #ifndef CONFIG_CPUMASK_OFFSTACK
 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 {
index 26e5b613deda9e63816b58c59f53f06d7dccd50c..09a545a7dfa39bcd7f736f446358d4c3b112aae4 100644 (file)
@@ -51,7 +51,8 @@ struct partial_page {
 struct splice_pipe_desc {
        struct page **pages;            /* page map */
        struct partial_page *partial;   /* pages[] may not be contig */
-       int nr_pages;                   /* number of pages in map */
+       int nr_pages;                   /* number of populated pages in map */
+       unsigned int nr_pages_max;      /* pages[] & partial[] arrays size */
        unsigned int flags;             /* splice flags */
        const struct pipe_buf_operations *ops;/* ops associated with output pipe */
        void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
 /*
  * for dynamic pipe sizing
  */
-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
-extern void splice_shrink_spd(struct pipe_inode_info *,
-                               struct splice_pipe_desc *);
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
index e4652fe5895863d956a52e436d10f1fc3724415b..fecdf31816f2fc6a834a060cfdd39158dcb30bc2 100644 (file)
@@ -912,6 +912,9 @@ struct sctp_transport {
                /* Is this structure kfree()able? */
                malloced:1;
 
+       /* Has this transport moved the ctsn since we last sacked */
+       __u32 sack_generation;
+
        struct flowi fl;
 
        /* This is the peer's IP address and port. */
@@ -1584,6 +1587,7 @@ struct sctp_association {
                 */
                __u8    sack_needed;     /* Do we need to sack the peer? */
                __u32   sack_cnt;
+               __u32   sack_generation;
 
                /* These are capabilities which our peer advertised.  */
                __u8    ecn_capable:1,      /* Can peer do ECN? */
index e7728bc14ccfde5bd85c3b08f990487af66ab072..2c5d2b4d5d1eb51542df26094700250ab6191070 100644 (file)
@@ -117,7 +117,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
 int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+                    struct sctp_transport *trans);
 
 /* Mark this TSN and all lower as seen. */
 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
index f4f1c96dca726ff2f00211994dcf7381ef55b5b0..10ce74f589c5fd57622f630a17fbbc7e736d24e4 100644 (file)
@@ -163,6 +163,8 @@ enum ata_command_set {
         ATAPI_COMMAND_SET = 1,
 };
 
+#define ATA_RESP_FIS_SIZE 24
+
 struct sata_device {
         enum   ata_command_set command_set;
         struct smp_resp        rps_resp; /* report_phy_sata_resp */
@@ -171,7 +173,7 @@ struct sata_device {
 
        struct ata_port *ap;
        struct ata_host ata_host;
-       struct ata_taskfile tf;
+       u8     fis[ATA_RESP_FIS_SIZE];
 };
 
 enum {
@@ -537,7 +539,7 @@ enum exec_status {
  */
 struct ata_task_resp {
        u16  frame_len;
-       u8   ending_fis[24];      /* dev to host or data-in */
+       u8   ending_fis[ATA_RESP_FIS_SIZE];       /* dev to host or data-in */
 };
 
 #define SAS_STATUS_BUF_SIZE 96
index 1e1198546c725d43a7ff6baa657c25ef364d73ee..ac06cc595890ef87a8e01632f5872ab11e4057f2 100644 (file)
@@ -134,10 +134,16 @@ struct scsi_cmnd {
 
 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 {
+       struct scsi_driver **sdp;
+
        if (!cmd->request->rq_disk)
                return NULL;
 
-       return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
+       sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
+       if (!sdp)
+               return NULL;
+
+       return *sdp;
 }
 
 extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
index 2097684cf19474aee676513af39de14035c4fce7..b303dfc7dce0703299c3e8840e91d101497495ec 100644 (file)
@@ -901,13 +901,10 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
                mutex_unlock(&cgroup_mutex);
 
                /*
-                * We want to drop the active superblock reference from the
-                * cgroup creation after all the dentry refs are gone -
-                * kill_sb gets mighty unhappy otherwise.  Mark
-                * dentry->d_fsdata with cgroup_diput() to tell
-                * cgroup_d_release() to call deactivate_super().
+                * Drop the active superblock reference that we took when we
+                * created the cgroup
                 */
-               dentry->d_fsdata = cgroup_diput;
+               deactivate_super(cgrp->root->sb);
 
                /*
                 * if we're getting rid of the cgroup, refcount should ensure
@@ -933,13 +930,6 @@ static int cgroup_delete(const struct dentry *d)
        return 1;
 }
 
-static void cgroup_d_release(struct dentry *dentry)
-{
-       /* did cgroup_diput() tell me to deactivate super? */
-       if (dentry->d_fsdata == cgroup_diput)
-               deactivate_super(dentry->d_sb);
-}
-
 static void remove_dir(struct dentry *d)
 {
        struct dentry *parent = dget(d->d_parent);
@@ -1547,7 +1537,6 @@ static int cgroup_get_rootdir(struct super_block *sb)
        static const struct dentry_operations cgroup_dops = {
                .d_iput = cgroup_diput,
                .d_delete = cgroup_delete,
-               .d_release = cgroup_d_release,
        };
 
        struct inode *inode =
@@ -3894,8 +3883,12 @@ static void css_dput_fn(struct work_struct *work)
 {
        struct cgroup_subsys_state *css =
                container_of(work, struct cgroup_subsys_state, dput_work);
+       struct dentry *dentry = css->cgroup->dentry;
+       struct super_block *sb = dentry->d_sb;
 
-       dput(css->cgroup->dentry);
+       atomic_inc(&sb->s_active);
+       dput(dentry);
+       deactivate_super(sb);
 }
 
 static void init_cgroup_css(struct cgroup_subsys_state *css,
index ab5211b9e622cf94d07b7bfb4ccfd9bac85e7b79..f00e319d8376289f0273fc1d301f4c1ebb132c86 100644 (file)
@@ -304,12 +304,17 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        }
 
        err = arch_dup_task_struct(tsk, orig);
-       if (err)
-               goto out;
 
+       /*
+        * We defer looking at err, because we will need this setup
+        * for the clean up path to work correctly.
+        */
        tsk->stack = ti;
-
        setup_thread_stack(tsk, orig);
+
+       if (err)
+               goto out;
+
        clear_user_return_notifier(tsk);
        clear_tsk_need_resched(tsk);
        stackend = end_of_stack(tsk);
index ae34bf51682b4a204de93f62943055350cd5c4d0..6db7a5ed52b58727d33293853053c0fee1d049fe 100644 (file)
@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
        return 0;
 }
 
+static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+{
+       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+
+       return ktime_get_update_offsets(offs_real, offs_boot);
+}
+
 /*
  * Retrigger next event is called after clock was set
  *
@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 static void retrigger_next_event(void *arg)
 {
        struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
-       struct timespec realtime_offset, xtim, wtm, sleep;
 
        if (!hrtimer_hres_active())
                return;
 
-       /* Optimized out for !HIGH_RES */
-       get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
-       set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
-
-       /* Adjust CLOCK_REALTIME offset */
        raw_spin_lock(&base->lock);
-       base->clock_base[HRTIMER_BASE_REALTIME].offset =
-               timespec_to_ktime(realtime_offset);
-       base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
-               timespec_to_ktime(sleep);
-
+       hrtimer_update_base(base);
        hrtimer_force_reprogram(base, 0);
        raw_spin_unlock(&base->lock);
 }
@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
                base->clock_base[i].resolution = KTIME_HIGH_RES;
 
        tick_setup_sched_timer();
-
        /* "Retrigger" the interrupt to get things going */
        retrigger_next_event(NULL);
        local_irq_restore(flags);
        return 1;
 }
 
+/*
+ * Called from timekeeping code to reprogramm the hrtimer interrupt
+ * device. If called from the timer interrupt context we defer it to
+ * softirq context.
+ */
+void clock_was_set_delayed(void)
+{
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       cpu_base->clock_was_set = 1;
+       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+}
+
 #else
 
 static inline int hrtimer_hres_active(void) { return 0; }
@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        cpu_base->nr_events++;
        dev->next_event.tv64 = KTIME_MAX;
 
-       entry_time = now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       entry_time = now = hrtimer_update_base(cpu_base);
 retry:
        expires_next.tv64 = KTIME_MAX;
-
-       raw_spin_lock(&cpu_base->lock);
        /*
         * We set expires_next to KTIME_MAX here with cpu_base->lock
         * held to prevent that a timer is enqueued in our queue via
@@ -1330,8 +1339,12 @@ retry:
         * We need to prevent that we loop forever in the hrtimer
         * interrupt routine. We give it 3 attempts to avoid
         * overreacting on some spurious event.
+        *
+        * Acquire base lock for updating the offsets and retrieving
+        * the current time.
         */
-       now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       now = hrtimer_update_base(cpu_base);
        cpu_base->nr_retries++;
        if (++retries < 3)
                goto retry;
@@ -1343,6 +1356,7 @@ retry:
         */
        cpu_base->nr_hangs++;
        cpu_base->hang_detected = 1;
+       raw_spin_unlock(&cpu_base->lock);
        delta = ktime_sub(now, entry_time);
        if (delta.tv64 > cpu_base->max_hang_time.tv64)
                cpu_base->max_hang_time = delta;
@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
 
 static void run_hrtimer_softirq(struct softirq_action *h)
 {
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       if (cpu_base->clock_was_set) {
+               cpu_base->clock_was_set = 0;
+               clock_was_set();
+       }
+
        hrtimer_peek_ahead_timers();
 }
 
index dba18211685ed199b1ea88b9c2439ce8986a8656..177fa49357a5d346f1296ca431d4a674c7659508 100644 (file)
@@ -194,8 +194,10 @@ static int console_may_schedule;
  */
 
 enum log_flags {
-       LOG_DEFAULT = 0,
-       LOG_NOCONS = 1,         /* already flushed, do not print to console */
+       LOG_NOCONS      = 1,    /* already flushed, do not print to console */
+       LOG_NEWLINE     = 2,    /* text ended with a newline */
+       LOG_PREFIX      = 4,    /* text started with a prefix */
+       LOG_CONT        = 8,    /* text is a fragment of a continuation line */
 };
 
 struct log {
@@ -217,6 +219,8 @@ static DEFINE_RAW_SPINLOCK(logbuf_lock);
 /* the next printk record to read by syslog(READ) or /proc/kmsg */
 static u64 syslog_seq;
 static u32 syslog_idx;
+static enum log_flags syslog_prev;
+static size_t syslog_partial;
 
 /* index and sequence number of the first record stored in the buffer */
 static u64 log_first_seq;
@@ -430,20 +434,20 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
        ret = mutex_lock_interruptible(&user->lock);
        if (ret)
                return ret;
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        while (user->seq == log_next_seq) {
                if (file->f_flags & O_NONBLOCK) {
                        ret = -EAGAIN;
-                       raw_spin_unlock(&logbuf_lock);
+                       raw_spin_unlock_irq(&logbuf_lock);
                        goto out;
                }
 
-               raw_spin_unlock(&logbuf_lock);
+               raw_spin_unlock_irq(&logbuf_lock);
                ret = wait_event_interruptible(log_wait,
                                               user->seq != log_next_seq);
                if (ret)
                        goto out;
-               raw_spin_lock(&logbuf_lock);
+               raw_spin_lock_irq(&logbuf_lock);
        }
 
        if (user->seq < log_first_seq) {
@@ -451,7 +455,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
                user->idx = log_first_idx;
                user->seq = log_first_seq;
                ret = -EPIPE;
-               raw_spin_unlock(&logbuf_lock);
+               raw_spin_unlock_irq(&logbuf_lock);
                goto out;
        }
 
@@ -465,7 +469,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
        for (i = 0; i < msg->text_len; i++) {
                unsigned char c = log_text(msg)[i];
 
-               if (c < ' ' || c >= 128)
+               if (c < ' ' || c >= 127 || c == '\\')
                        len += sprintf(user->buf + len, "\\x%02x", c);
                else
                        user->buf[len++] = c;
@@ -489,7 +493,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
                                continue;
                        }
 
-                       if (c < ' ' || c >= 128) {
+                       if (c < ' ' || c >= 127 || c == '\\') {
                                len += sprintf(user->buf + len, "\\x%02x", c);
                                continue;
                        }
@@ -501,7 +505,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
 
        user->idx = log_next(user->idx);
        user->seq++;
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        if (len > count) {
                ret = -EINVAL;
@@ -528,7 +532,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
        if (offset)
                return -ESPIPE;
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        switch (whence) {
        case SEEK_SET:
                /* the first record */
@@ -552,7 +556,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
        default:
                ret = -EINVAL;
        }
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
        return ret;
 }
 
@@ -566,14 +570,14 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &log_wait, wait);
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        if (user->seq < log_next_seq) {
                /* return error when data has vanished underneath us */
                if (user->seq < log_first_seq)
                        ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
                ret = POLLIN|POLLRDNORM;
        }
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        return ret;
 }
@@ -597,10 +601,10 @@ static int devkmsg_open(struct inode *inode, struct file *file)
 
        mutex_init(&user->lock);
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        user->idx = log_first_idx;
        user->seq = log_first_seq;
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        file->private_data = user;
        return 0;
@@ -818,15 +822,18 @@ static size_t print_time(u64 ts, char *buf)
 static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
 {
        size_t len = 0;
+       unsigned int prefix = (msg->facility << 3) | msg->level;
 
        if (syslog) {
                if (buf) {
-                       len += sprintf(buf, "<%u>", msg->level);
+                       len += sprintf(buf, "<%u>", prefix);
                } else {
                        len += 3;
-                       if (msg->level > 9)
-                               len++;
-                       if (msg->level > 99)
+                       if (prefix > 999)
+                               len += 3;
+                       else if (prefix > 99)
+                               len += 2;
+                       else if (prefix > 9)
                                len++;
                }
        }
@@ -835,13 +842,26 @@ static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
        return len;
 }
 
-static size_t msg_print_text(const struct log *msg, bool syslog,
-                            char *buf, size_t size)
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+                            bool syslog, char *buf, size_t size)
 {
        const char *text = log_text(msg);
        size_t text_size = msg->text_len;
+       bool prefix = true;
+       bool newline = true;
        size_t len = 0;
 
+       if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
+               prefix = false;
+
+       if (msg->flags & LOG_CONT) {
+               if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
+                       prefix = false;
+
+               if (!(msg->flags & LOG_NEWLINE))
+                       newline = false;
+       }
+
        do {
                const char *next = memchr(text, '\n', text_size);
                size_t text_len;
@@ -859,16 +879,22 @@ static size_t msg_print_text(const struct log *msg, bool syslog,
                            text_len + 1>= size - len)
                                break;
 
-                       len += print_prefix(msg, syslog, buf + len);
+                       if (prefix)
+                               len += print_prefix(msg, syslog, buf + len);
                        memcpy(buf + len, text, text_len);
                        len += text_len;
-                       buf[len++] = '\n';
+                       if (next || newline)
+                               buf[len++] = '\n';
                } else {
                        /* SYSLOG_ACTION_* buffer size only calculation */
-                       len += print_prefix(msg, syslog, NULL);
-                       len += text_len + 1;
+                       if (prefix)
+                               len += print_prefix(msg, syslog, NULL);
+                       len += text_len;
+                       if (next || newline)
+                               len++;
                }
 
+               prefix = true;
                text = next;
        } while (text);
 
@@ -887,22 +913,35 @@ static int syslog_print(char __user *buf, int size)
 
        while (size > 0) {
                size_t n;
+               size_t skip;
 
                raw_spin_lock_irq(&logbuf_lock);
                if (syslog_seq < log_first_seq) {
                        /* messages are gone, move to first one */
                        syslog_seq = log_first_seq;
                        syslog_idx = log_first_idx;
+                       syslog_prev = 0;
+                       syslog_partial = 0;
                }
                if (syslog_seq == log_next_seq) {
                        raw_spin_unlock_irq(&logbuf_lock);
                        break;
                }
+
+               skip = syslog_partial;
                msg = log_from_idx(syslog_idx);
-               n = msg_print_text(msg, true, text, LOG_LINE_MAX);
-               if (n <= size) {
+               n = msg_print_text(msg, syslog_prev, true, text, LOG_LINE_MAX);
+               if (n - syslog_partial <= size) {
+                       /* message fits into buffer, move forward */
                        syslog_idx = log_next(syslog_idx);
                        syslog_seq++;
+                       syslog_prev = msg->flags;
+                       n -= syslog_partial;
+                       syslog_partial = 0;
+               } else if (!len){
+                       /* partial read(), remember position */
+                       n = size;
+                       syslog_partial += n;
                } else
                        n = 0;
                raw_spin_unlock_irq(&logbuf_lock);
@@ -910,17 +949,15 @@ static int syslog_print(char __user *buf, int size)
                if (!n)
                        break;
 
-               len += n;
-               size -= n;
-               buf += n;
-               n = copy_to_user(buf - n, text, n);
-
-               if (n) {
-                       len -= n;
+               if (copy_to_user(buf, text + skip, n)) {
                        if (!len)
                                len = -EFAULT;
                        break;
                }
+
+               len += n;
+               size -= n;
+               buf += n;
        }
 
        kfree(text);
@@ -941,6 +978,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                u64 next_seq;
                u64 seq;
                u32 idx;
+               enum log_flags prev;
 
                if (clear_seq < log_first_seq) {
                        /* messages are gone, move to first available one */
@@ -954,10 +992,11 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                 */
                seq = clear_seq;
                idx = clear_idx;
+               prev = 0;
                while (seq < log_next_seq) {
                        struct log *msg = log_from_idx(idx);
 
-                       len += msg_print_text(msg, true, NULL, 0);
+                       len += msg_print_text(msg, prev, true, NULL, 0);
                        idx = log_next(idx);
                        seq++;
                }
@@ -965,10 +1004,11 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                /* move first record forward until length fits into the buffer */
                seq = clear_seq;
                idx = clear_idx;
+               prev = 0;
                while (len > size && seq < log_next_seq) {
                        struct log *msg = log_from_idx(idx);
 
-                       len -= msg_print_text(msg, true, NULL, 0);
+                       len -= msg_print_text(msg, prev, true, NULL, 0);
                        idx = log_next(idx);
                        seq++;
                }
@@ -977,17 +1017,19 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                next_seq = log_next_seq;
 
                len = 0;
+               prev = 0;
                while (len >= 0 && seq < next_seq) {
                        struct log *msg = log_from_idx(idx);
                        int textlen;
 
-                       textlen = msg_print_text(msg, true, text, LOG_LINE_MAX);
+                       textlen = msg_print_text(msg, prev, true, text, LOG_LINE_MAX);
                        if (textlen < 0) {
                                len = textlen;
                                break;
                        }
                        idx = log_next(idx);
                        seq++;
+                       prev = msg->flags;
 
                        raw_spin_unlock_irq(&logbuf_lock);
                        if (copy_to_user(buf + len, text, textlen))
@@ -1000,6 +1042,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                                /* messages are gone, move to next one */
                                seq = log_first_seq;
                                idx = log_first_idx;
+                               prev = 0;
                        }
                }
        }
@@ -1018,7 +1061,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
 {
        bool clear = false;
        static int saved_console_loglevel = -1;
-       static DEFINE_MUTEX(syslog_mutex);
        int error;
 
        error = check_syslog_permissions(type, from_file);
@@ -1045,17 +1087,11 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
                        error = -EFAULT;
                        goto out;
                }
-               error = mutex_lock_interruptible(&syslog_mutex);
-               if (error)
-                       goto out;
                error = wait_event_interruptible(log_wait,
                                                 syslog_seq != log_next_seq);
-               if (error) {
-                       mutex_unlock(&syslog_mutex);
+               if (error)
                        goto out;
-               }
                error = syslog_print(buf, len);
-               mutex_unlock(&syslog_mutex);
                break;
        /* Read/clear last kernel messages */
        case SYSLOG_ACTION_READ_CLEAR:
@@ -1111,6 +1147,8 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
                        /* messages are gone, move to first one */
                        syslog_seq = log_first_seq;
                        syslog_idx = log_first_idx;
+                       syslog_prev = 0;
+                       syslog_partial = 0;
                }
                if (from_file) {
                        /*
@@ -1120,19 +1158,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
                         */
                        error = log_next_idx - syslog_idx;
                } else {
-                       u64 seq;
-                       u32 idx;
+                       u64 seq = syslog_seq;
+                       u32 idx = syslog_idx;
+                       enum log_flags prev = syslog_prev;
 
                        error = 0;
-                       seq = syslog_seq;
-                       idx = syslog_idx;
                        while (seq < log_next_seq) {
                                struct log *msg = log_from_idx(idx);
 
-                               error += msg_print_text(msg, true, NULL, 0);
+                               error += msg_print_text(msg, prev, true, NULL, 0);
                                idx = log_next(idx);
                                seq++;
+                               prev = msg->flags;
                        }
+                       error -= syslog_partial;
                }
                raw_spin_unlock_irq(&logbuf_lock);
                break;
@@ -1400,10 +1439,9 @@ asmlinkage int vprintk_emit(int facility, int level,
        static char textbuf[LOG_LINE_MAX];
        char *text = textbuf;
        size_t text_len;
+       enum log_flags lflags = 0;
        unsigned long flags;
        int this_cpu;
-       bool newline = false;
-       bool prefix = false;
        int printed_len = 0;
 
        boot_delay_msec();
@@ -1442,7 +1480,7 @@ asmlinkage int vprintk_emit(int facility, int level,
                recursion_bug = 0;
                printed_len += strlen(recursion_msg);
                /* emit KERN_CRIT message */
-               log_store(0, 2, LOG_DEFAULT, 0,
+               log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
                          NULL, 0, recursion_msg, printed_len);
        }
 
@@ -1455,7 +1493,7 @@ asmlinkage int vprintk_emit(int facility, int level,
        /* mark and strip a trailing newline */
        if (text_len && text[text_len-1] == '\n') {
                text_len--;
-               newline = true;
+               lflags |= LOG_NEWLINE;
        }
 
        /* strip syslog prefix and extract log level or control flags */
@@ -1465,7 +1503,7 @@ asmlinkage int vprintk_emit(int facility, int level,
                        if (level == -1)
                                level = text[1] - '0';
                case 'd':       /* KERN_DEFAULT */
-                       prefix = true;
+                       lflags |= LOG_PREFIX;
                case 'c':       /* KERN_CONT */
                        text += 3;
                        text_len -= 3;
@@ -1475,22 +1513,20 @@ asmlinkage int vprintk_emit(int facility, int level,
        if (level == -1)
                level = default_message_loglevel;
 
-       if (dict) {
-               prefix = true;
-               newline = true;
-       }
+       if (dict)
+               lflags |= LOG_PREFIX|LOG_NEWLINE;
 
-       if (!newline) {
+       if (!(lflags & LOG_NEWLINE)) {
                /*
                 * Flush the conflicting buffer. An earlier newline was missing,
                 * or another task also prints continuation lines.
                 */
-               if (cont.len && (prefix || cont.owner != current))
+               if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
                        cont_flush();
 
                /* buffer line if possible, otherwise store it right away */
                if (!cont_add(facility, level, text, text_len))
-                       log_store(facility, level, LOG_DEFAULT, 0,
+                       log_store(facility, level, lflags | LOG_CONT, 0,
                                  dict, dictlen, text, text_len);
        } else {
                bool stored = false;
@@ -1502,13 +1538,13 @@ asmlinkage int vprintk_emit(int facility, int level,
                 * flush it out and store this line separately.
                 */
                if (cont.len && cont.owner == current) {
-                       if (!prefix)
+                       if (!(lflags & LOG_PREFIX))
                                stored = cont_add(facility, level, text, text_len);
                        cont_flush();
                }
 
                if (!stored)
-                       log_store(facility, level, LOG_DEFAULT, 0,
+                       log_store(facility, level, lflags, 0,
                                  dict, dictlen, text, text_len);
        }
        printed_len += text_len;
@@ -1607,8 +1643,8 @@ static struct cont {
 static struct log *log_from_idx(u32 idx) { return NULL; }
 static u32 log_next(u32 idx) { return 0; }
 static void call_console_drivers(int level, const char *text, size_t len) {}
-static size_t msg_print_text(const struct log *msg, bool syslog,
-                            char *buf, size_t size) { return 0; }
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+                            bool syslog, char *buf, size_t size) { return 0; }
 static size_t cont_print_text(char *text, size_t size) { return 0; }
 
 #endif /* CONFIG_PRINTK */
@@ -1884,6 +1920,7 @@ void wake_up_klogd(void)
 /* the next printk record to write to the console */
 static u64 console_seq;
 static u32 console_idx;
+static enum log_flags console_prev;
 
 /**
  * console_unlock - unlock the console system
@@ -1944,6 +1981,7 @@ again:
                        /* messages are gone, move to first one */
                        console_seq = log_first_seq;
                        console_idx = log_first_idx;
+                       console_prev = 0;
                }
 skip:
                if (console_seq == log_next_seq)
@@ -1957,14 +1995,21 @@ skip:
                         */
                        console_idx = log_next(console_idx);
                        console_seq++;
+                       /*
+                        * We will get here again when we register a new
+                        * CON_PRINTBUFFER console. Clear the flag so we
+                        * will properly dump everything later.
+                        */
+                       msg->flags &= ~LOG_NOCONS;
                        goto skip;
                }
 
                level = msg->level;
-               len = msg_print_text(msg, false, text, sizeof(text));
-
+               len = msg_print_text(msg, console_prev, false,
+                                    text, sizeof(text));
                console_idx = log_next(console_idx);
                console_seq++;
+               console_prev = msg->flags;
                raw_spin_unlock(&logbuf_lock);
 
                stop_critical_timings();        /* don't trace print latency */
@@ -2227,6 +2272,7 @@ void register_console(struct console *newcon)
                raw_spin_lock_irqsave(&logbuf_lock, flags);
                console_seq = syslog_seq;
                console_idx = syslog_idx;
+               console_prev = syslog_prev;
                raw_spin_unlock_irqrestore(&logbuf_lock, flags);
                /*
                 * We're about to replay the log buffer.  Only do this to the
@@ -2520,8 +2566,7 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
        }
 
        msg = log_from_idx(dumper->cur_idx);
-       l = msg_print_text(msg, syslog,
-                             line, size);
+       l = msg_print_text(msg, 0, syslog, line, size);
 
        dumper->cur_idx = log_next(dumper->cur_idx);
        dumper->cur_seq++;
@@ -2561,6 +2606,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
        u32 idx;
        u64 next_seq;
        u32 next_idx;
+       enum log_flags prev;
        size_t l = 0;
        bool ret = false;
 
@@ -2583,23 +2629,27 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
        /* calculate length of entire buffer */
        seq = dumper->cur_seq;
        idx = dumper->cur_idx;
+       prev = 0;
        while (seq < dumper->next_seq) {
                struct log *msg = log_from_idx(idx);
 
-               l += msg_print_text(msg, true, NULL, 0);
+               l += msg_print_text(msg, prev, true, NULL, 0);
                idx = log_next(idx);
                seq++;
+               prev = msg->flags;
        }
 
        /* move first record forward until length fits into the buffer */
        seq = dumper->cur_seq;
        idx = dumper->cur_idx;
+       prev = 0;
        while (l > size && seq < dumper->next_seq) {
                struct log *msg = log_from_idx(idx);
 
-               l -= msg_print_text(msg, true, NULL, 0);
+               l -= msg_print_text(msg, prev, true, NULL, 0);
                idx = log_next(idx);
                seq++;
+               prev = msg->flags;
        }
 
        /* last message in next interation */
@@ -2607,14 +2657,14 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
        next_idx = idx;
 
        l = 0;
+       prev = 0;
        while (seq < dumper->next_seq) {
                struct log *msg = log_from_idx(idx);
 
-               l += msg_print_text(msg, syslog,
-                                   buf + l, size - l);
-
+               l += msg_print_text(msg, prev, syslog, buf + l, size - l);
                idx = log_next(idx);
                seq++;
+               prev = msg->flags;
        }
 
        dumper->next_seq = next_seq;
index 38ecdda3f55f6b3e503cf12e447c1801ae3c2812..4b97bba7396e6194db4bf5b644ef8b5a1997c974 100644 (file)
@@ -201,6 +201,7 @@ void rcu_note_context_switch(int cpu)
 {
        trace_rcu_utilization("Start context switch");
        rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch(cpu);
        trace_rcu_utilization("End context switch");
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
index ea056495783e1a09c482802bd2e66d0c863c8a35..19b61ac1079f825702a7f17ae7d8dec88756c141 100644 (file)
@@ -444,6 +444,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
 /* Forward declarations for rcutree_plugin.h */
 static void rcu_bootup_announce(void);
 long rcu_batches_completed(void);
+static void rcu_preempt_note_context_switch(int cpu);
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
index 5271a020887e6ea515aa29c9f52f9f612d9056ac..3e4899459f3d9d7177ee67d41b8ea1121ddb9540 100644 (file)
@@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu)
  *
  * Caller must disable preemption.
  */
-void rcu_preempt_note_context_switch(void)
+static void rcu_preempt_note_context_switch(int cpu)
 {
        struct task_struct *t = current;
        unsigned long flags;
@@ -164,7 +164,7 @@ void rcu_preempt_note_context_switch(void)
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
-               rdp = __this_cpu_ptr(rcu_preempt_state.rda);
+               rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
                rnp = rdp->mynode;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -228,7 +228,7 @@ void rcu_preempt_note_context_switch(void)
         * means that we continue to block the current grace period.
         */
        local_irq_save(flags);
-       rcu_preempt_qs(smp_processor_id());
+       rcu_preempt_qs(cpu);
        local_irq_restore(flags);
 }
 
@@ -1001,6 +1001,14 @@ void rcu_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * CPUs being in quiescent states.
+ */
+static void rcu_preempt_note_context_switch(int cpu)
+{
+}
+
 /*
  * Because preemptible RCU does not exist, there are never any preempted
  * RCU readers.
index ab56a1764d4db8c6a5136cd3b636dc330648783b..e8cd2027abbd1e2e0ed6d432974816cef2845a42 100644 (file)
@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .nr_pages = 0,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .partial = partial,
                .flags = flags,
                .ops = &relay_pipe_buf_ops,
@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
                 ret += padding;
 
 out:
-       splice_shrink_spd(pipe, &spd);
-        return ret;
+       splice_shrink_spd(&spd);
+       return ret;
 }
 
 static ssize_t relay_file_splice_read(struct file *in,
index d5594a4268d4a5bb531bcf86dfc66fe359e7c956..468bdd44c1baeb914cfc93037b86691b839ccc46 100644 (file)
@@ -2081,7 +2081,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
 #endif
 
        /* Here we just switch the register state and the stack. */
-       rcu_switch_from(prev);
        switch_to(prev, next, prev);
 
        barrier();
@@ -2161,11 +2160,73 @@ unsigned long this_cpu_load(void)
 }
 
 
+/*
+ * Global load-average calculations
+ *
+ * We take a distributed and async approach to calculating the global load-avg
+ * in order to minimize overhead.
+ *
+ * The global load average is an exponentially decaying average of nr_running +
+ * nr_uninterruptible.
+ *
+ * Once every LOAD_FREQ:
+ *
+ *   nr_active = 0;
+ *   for_each_possible_cpu(cpu)
+ *     nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
+ *
+ *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
+ *
+ * Due to a number of reasons the above turns in the mess below:
+ *
+ *  - for_each_possible_cpu() is prohibitively expensive on machines with
+ *    serious number of cpus, therefore we need to take a distributed approach
+ *    to calculating nr_active.
+ *
+ *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
+ *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
+ *
+ *    So assuming nr_active := 0 when we start out -- true per definition, we
+ *    can simply take per-cpu deltas and fold those into a global accumulate
+ *    to obtain the same result. See calc_load_fold_active().
+ *
+ *    Furthermore, in order to avoid synchronizing all per-cpu delta folding
+ *    across the machine, we assume 10 ticks is sufficient time for every
+ *    cpu to have completed this task.
+ *
+ *    This places an upper-bound on the IRQ-off latency of the machine. Then
+ *    again, being late doesn't loose the delta, just wrecks the sample.
+ *
+ *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
+ *    this would add another cross-cpu cacheline miss and atomic operation
+ *    to the wakeup path. Instead we increment on whatever cpu the task ran
+ *    when it went into uninterruptible state and decrement on whatever cpu
+ *    did the wakeup. This means that only the sum of nr_uninterruptible over
+ *    all cpus yields the correct result.
+ *
+ *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
+ */
+
 /* Variables and functions for calc_load */
 static atomic_long_t calc_load_tasks;
 static unsigned long calc_load_update;
 unsigned long avenrun[3];
-EXPORT_SYMBOL(avenrun);
+EXPORT_SYMBOL(avenrun); /* should be removed */
+
+/**
+ * get_avenrun - get the load average array
+ * @loads:     pointer to dest load array
+ * @offset:    offset to add
+ * @shift:     shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+{
+       loads[0] = (avenrun[0] + offset) << shift;
+       loads[1] = (avenrun[1] + offset) << shift;
+       loads[2] = (avenrun[2] + offset) << shift;
+}
 
 static long calc_load_fold_active(struct rq *this_rq)
 {
@@ -2182,6 +2243,9 @@ static long calc_load_fold_active(struct rq *this_rq)
        return delta;
 }
 
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
 static unsigned long
 calc_load(unsigned long load, unsigned long exp, unsigned long active)
 {
@@ -2193,30 +2257,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
 
 #ifdef CONFIG_NO_HZ
 /*
- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+ * Handle NO_HZ for the global load-average.
+ *
+ * Since the above described distributed algorithm to compute the global
+ * load-average relies on per-cpu sampling from the tick, it is affected by
+ * NO_HZ.
+ *
+ * The basic idea is to fold the nr_active delta into a global idle-delta upon
+ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
+ * when we read the global state.
+ *
+ * Obviously reality has to ruin such a delightfully simple scheme:
+ *
+ *  - When we go NO_HZ idle during the window, we can negate our sample
+ *    contribution, causing under-accounting.
+ *
+ *    We avoid this by keeping two idle-delta counters and flipping them
+ *    when the window starts, thus separating old and new NO_HZ load.
+ *
+ *    The only trick is the slight shift in index flip for read vs write.
+ *
+ *        0s            5s            10s           15s
+ *          +10           +10           +10           +10
+ *        |-|-----------|-|-----------|-|-----------|-|
+ *    r:0 0 1           1 0           0 1           1 0
+ *    w:0 1 1           0 0           1 1           0 0
+ *
+ *    This ensures we'll fold the old idle contribution in this window while
+ *    accumlating the new one.
+ *
+ *  - When we wake up from NO_HZ idle during the window, we push up our
+ *    contribution, since we effectively move our sample point to a known
+ *    busy state.
+ *
+ *    This is solved by pushing the window forward, and thus skipping the
+ *    sample, for this cpu (effectively using the idle-delta for this cpu which
+ *    was in effect at the time the window opened). This also solves the issue
+ *    of having to deal with a cpu having been in NOHZ idle for multiple
+ *    LOAD_FREQ intervals.
  *
  * When making the ILB scale, we should try to pull this in as well.
  */
-static atomic_long_t calc_load_tasks_idle;
+static atomic_long_t calc_load_idle[2];
+static int calc_load_idx;
 
-void calc_load_account_idle(struct rq *this_rq)
+static inline int calc_load_write_idx(void)
 {
+       int idx = calc_load_idx;
+
+       /*
+        * See calc_global_nohz(), if we observe the new index, we also
+        * need to observe the new update time.
+        */
+       smp_rmb();
+
+       /*
+        * If the folding window started, make sure we start writing in the
+        * next idle-delta.
+        */
+       if (!time_before(jiffies, calc_load_update))
+               idx++;
+
+       return idx & 1;
+}
+
+static inline int calc_load_read_idx(void)
+{
+       return calc_load_idx & 1;
+}
+
+void calc_load_enter_idle(void)
+{
+       struct rq *this_rq = this_rq();
        long delta;
 
+       /*
+        * We're going into NOHZ mode, if there's any pending delta, fold it
+        * into the pending idle delta.
+        */
        delta = calc_load_fold_active(this_rq);
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks_idle);
+       if (delta) {
+               int idx = calc_load_write_idx();
+               atomic_long_add(delta, &calc_load_idle[idx]);
+       }
 }
 
-static long calc_load_fold_idle(void)
+void calc_load_exit_idle(void)
 {
-       long delta = 0;
+       struct rq *this_rq = this_rq();
+
+       /*
+        * If we're still before the sample window, we're done.
+        */
+       if (time_before(jiffies, this_rq->calc_load_update))
+               return;
 
        /*
-        * Its got a race, we don't care...
+        * We woke inside or after the sample window, this means we're already
+        * accounted through the nohz accounting, so skip the entire deal and
+        * sync up for the next window.
         */
-       if (atomic_long_read(&calc_load_tasks_idle))
-               delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
+       this_rq->calc_load_update = calc_load_update;
+       if (time_before(jiffies, this_rq->calc_load_update + 10))
+               this_rq->calc_load_update += LOAD_FREQ;
+}
+
+static long calc_load_fold_idle(void)
+{
+       int idx = calc_load_read_idx();
+       long delta = 0;
+
+       if (atomic_long_read(&calc_load_idle[idx]))
+               delta = atomic_long_xchg(&calc_load_idle[idx], 0);
 
        return delta;
 }
@@ -2302,66 +2454,39 @@ static void calc_global_nohz(void)
 {
        long delta, active, n;
 
-       /*
-        * If we crossed a calc_load_update boundary, make sure to fold
-        * any pending idle changes, the respective CPUs might have
-        * missed the tick driven calc_load_account_active() update
-        * due to NO_HZ.
-        */
-       delta = calc_load_fold_idle();
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks);
-
-       /*
-        * It could be the one fold was all it took, we done!
-        */
-       if (time_before(jiffies, calc_load_update + 10))
-               return;
-
-       /*
-        * Catch-up, fold however many we are behind still
-        */
-       delta = jiffies - calc_load_update - 10;
-       n = 1 + (delta / LOAD_FREQ);
+       if (!time_before(jiffies, calc_load_update + 10)) {
+               /*
+                * Catch-up, fold however many we are behind still
+                */
+               delta = jiffies - calc_load_update - 10;
+               n = 1 + (delta / LOAD_FREQ);
 
-       active = atomic_long_read(&calc_load_tasks);
-       active = active > 0 ? active * FIXED_1 : 0;
+               active = atomic_long_read(&calc_load_tasks);
+               active = active > 0 ? active * FIXED_1 : 0;
 
-       avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
-       avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
-       avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+               avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+               avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+               avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
 
-       calc_load_update += n * LOAD_FREQ;
-}
-#else
-void calc_load_account_idle(struct rq *this_rq)
-{
-}
+               calc_load_update += n * LOAD_FREQ;
+       }
 
-static inline long calc_load_fold_idle(void)
-{
-       return 0;
+       /*
+        * Flip the idle index...
+        *
+        * Make sure we first write the new time then flip the index, so that
+        * calc_load_write_idx() will see the new time when it reads the new
+        * index, this avoids a double flip messing things up.
+        */
+       smp_wmb();
+       calc_load_idx++;
 }
+#else /* !CONFIG_NO_HZ */
 
-static void calc_global_nohz(void)
-{
-}
-#endif
+static inline long calc_load_fold_idle(void) { return 0; }
+static inline void calc_global_nohz(void) { }
 
-/**
- * get_avenrun - get the load average array
- * @loads:     pointer to dest load array
- * @offset:    offset to add
- * @shift:     shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-{
-       loads[0] = (avenrun[0] + offset) << shift;
-       loads[1] = (avenrun[1] + offset) << shift;
-       loads[2] = (avenrun[2] + offset) << shift;
-}
+#endif /* CONFIG_NO_HZ */
 
 /*
  * calc_load - update the avenrun load estimates 10 ticks after the
@@ -2369,11 +2494,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  */
 void calc_global_load(unsigned long ticks)
 {
-       long active;
+       long active, delta;
 
        if (time_before(jiffies, calc_load_update + 10))
                return;
 
+       /*
+        * Fold the 'old' idle-delta to include all NO_HZ cpus.
+        */
+       delta = calc_load_fold_idle();
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks);
+
        active = atomic_long_read(&calc_load_tasks);
        active = active > 0 ? active * FIXED_1 : 0;
 
@@ -2384,12 +2516,7 @@ void calc_global_load(unsigned long ticks)
        calc_load_update += LOAD_FREQ;
 
        /*
-        * Account one period with whatever state we found before
-        * folding in the nohz state and ageing the entire idle period.
-        *
-        * This avoids loosing a sample when we go idle between 
-        * calc_load_account_active() (10 ticks ago) and now and thus
-        * under-accounting.
+        * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
         */
        calc_global_nohz();
 }
@@ -2406,13 +2533,16 @@ static void calc_load_account_active(struct rq *this_rq)
                return;
 
        delta  = calc_load_fold_active(this_rq);
-       delta += calc_load_fold_idle();
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 
        this_rq->calc_load_update += LOAD_FREQ;
 }
 
+/*
+ * End of global load-average stuff
+ */
+
 /*
  * The exact cpuload at various idx values, calculated at every tick would be
  * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
index b44d604b35d1a8b4164aa2ea4d94d00bf9063824..b6baf370cae973707c24389b30b38e43c76bb4d7 100644 (file)
@@ -25,7 +25,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
 static struct task_struct *pick_next_task_idle(struct rq *rq)
 {
        schedstat_inc(rq, sched_goidle);
-       calc_load_account_idle(rq);
        return rq->idle;
 }
 
index 6d52cea7f33dc77496ec6db9e975cd2c0fd1c2b9..55844f24435a0ad5c1c50561a54424ce4b25992d 100644 (file)
@@ -942,8 +942,6 @@ static inline u64 sched_avg_period(void)
        return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
 }
 
-void calc_load_account_idle(struct rq *this_rq);
-
 #ifdef CONFIG_SCHED_HRTICK
 
 /*
index e0c8ffc50d7fc48bfaa73f43946638a47c947fdb..2d39a84cd8575e6295f666421cee01b41652b3b1 100644 (file)
@@ -1788,7 +1788,6 @@ SYSCALL_DEFINE1(umask, int, mask)
 #ifdef CONFIG_CHECKPOINT_RESTORE
 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
 {
-       struct vm_area_struct *vma;
        struct file *exe_file;
        struct dentry *dentry;
        int err;
@@ -1816,13 +1815,17 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
        down_write(&mm->mmap_sem);
 
        /*
-        * Forbid mm->exe_file change if there are mapped other files.
+        * Forbid mm->exe_file change if old file still mapped.
         */
        err = -EBUSY;
-       for (vma = mm->mmap; vma; vma = vma->vm_next) {
-               if (vma->vm_file && !path_equal(&vma->vm_file->f_path,
-                                               &exe_file->f_path))
-                       goto exit_unlock;
+       if (mm->exe_file) {
+               struct vm_area_struct *vma;
+
+               for (vma = mm->mmap; vma; vma = vma->vm_next)
+                       if (vma->vm_file &&
+                           path_equal(&vma->vm_file->f_path,
+                                      &mm->exe_file->f_path))
+                               goto exit_unlock;
        }
 
        /*
@@ -1835,6 +1838,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
        if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
                goto exit_unlock;
 
+       err = 0;
        set_mm_exe_file(mm, exe_file);
 exit_unlock:
        up_write(&mm->mmap_sem);
index 8699978339286b444d2d096dd3f0e9b6fba5c898..4a08472c3ca71399edc34ffafa9f5394f25597e7 100644 (file)
@@ -406,6 +406,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
                 */
                if (!ts->tick_stopped) {
                        select_nohz_load_balancer(1);
+                       calc_load_enter_idle();
 
                        ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
                        ts->tick_stopped = 1;
@@ -597,6 +598,7 @@ void tick_nohz_idle_exit(void)
                account_idle_ticks(ticks);
 #endif
 
+       calc_load_exit_idle();
        touch_softlockup_watchdog();
        /*
         * Cancel the scheduled timer and restore the tick
index 6f46a00a1e8a703118ab9d8be3905758f08185c8..269b1fe5f2ae2f7e6c0bb0fac8ce9a54e2d4d278 100644 (file)
@@ -70,6 +70,12 @@ struct timekeeper {
        /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
        struct timespec raw_time;
 
+       /* Offset clock monotonic -> clock realtime */
+       ktime_t offs_real;
+
+       /* Offset clock monotonic -> clock boottime */
+       ktime_t offs_boot;
+
        /* Seqlock for all timekeeper values */
        seqlock_t lock;
 };
@@ -172,6 +178,14 @@ static inline s64 timekeeping_get_ns_raw(void)
        return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
 }
 
+static void update_rt_offset(void)
+{
+       struct timespec tmp, *wtm = &timekeeper.wall_to_monotonic;
+
+       set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
+       timekeeper.offs_real = timespec_to_ktime(tmp);
+}
+
 /* must hold write on timekeeper.lock */
 static void timekeeping_update(bool clearntp)
 {
@@ -179,6 +193,7 @@ static void timekeeping_update(bool clearntp)
                timekeeper.ntp_error = 0;
                ntp_clear();
        }
+       update_rt_offset();
        update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
                         timekeeper.clock, timekeeper.mult);
 }
@@ -604,6 +619,7 @@ void __init timekeeping_init(void)
        }
        set_normalized_timespec(&timekeeper.wall_to_monotonic,
                                -boot.tv_sec, -boot.tv_nsec);
+       update_rt_offset();
        timekeeper.total_sleep_time.tv_sec = 0;
        timekeeper.total_sleep_time.tv_nsec = 0;
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -612,6 +628,12 @@ void __init timekeeping_init(void)
 /* time in seconds when suspend began */
 static struct timespec timekeeping_suspend_time;
 
+static void update_sleep_time(struct timespec t)
+{
+       timekeeper.total_sleep_time = t;
+       timekeeper.offs_boot = timespec_to_ktime(t);
+}
+
 /**
  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
  * @delta: pointer to a timespec delta value
@@ -630,8 +652,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
        timekeeper.xtime = timespec_add(timekeeper.xtime, *delta);
        timekeeper.wall_to_monotonic =
                        timespec_sub(timekeeper.wall_to_monotonic, *delta);
-       timekeeper.total_sleep_time = timespec_add(
-                                       timekeeper.total_sleep_time, *delta);
+       update_sleep_time(timespec_add(timekeeper.total_sleep_time, *delta));
 }
 
 
@@ -963,6 +984,8 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
                leap = second_overflow(timekeeper.xtime.tv_sec);
                timekeeper.xtime.tv_sec += leap;
                timekeeper.wall_to_monotonic.tv_sec -= leap;
+               if (leap)
+                       clock_was_set_delayed();
        }
 
        /* Accumulate raw time */
@@ -1079,6 +1102,8 @@ static void update_wall_time(void)
                leap = second_overflow(timekeeper.xtime.tv_sec);
                timekeeper.xtime.tv_sec += leap;
                timekeeper.wall_to_monotonic.tv_sec -= leap;
+               if (leap)
+                       clock_was_set_delayed();
        }
 
        timekeeping_update(false);
@@ -1246,6 +1271,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
        } while (read_seqretry(&timekeeper.lock, seq));
 }
 
+#ifdef CONFIG_HIGH_RES_TIMERS
+/**
+ * ktime_get_update_offsets - hrtimer helper
+ * @offs_real: pointer to storage for monotonic -> realtime offset
+ * @offs_boot: pointer to storage for monotonic -> boottime offset
+ *
+ * Returns current monotonic time and updates the offsets
+ * Called from hrtimer_interupt() or retrigger_next_event()
+ */
+ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
+{
+       ktime_t now;
+       unsigned int seq;
+       u64 secs, nsecs;
+
+       do {
+               seq = read_seqbegin(&timekeeper.lock);
+
+               secs = timekeeper.xtime.tv_sec;
+               nsecs = timekeeper.xtime.tv_nsec;
+               nsecs += timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
+
+               *offs_real = timekeeper.offs_real;
+               *offs_boot = timekeeper.offs_boot;
+       } while (read_seqretry(&timekeeper.lock, seq));
+
+       now = ktime_add_ns(ktime_set(secs, 0), nsecs);
+       now = ktime_sub(now, *offs_real);
+       return now;
+}
+#endif
+
 /**
  * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
  */
index 1d0f6a8a0e5e83680c0df3b28836a5f6a2103a39..f765465bffe47968752e272308d7fb5b63093a65 100644 (file)
@@ -1075,6 +1075,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
        rb_init_page(bpage->page);
 
        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+       INIT_LIST_HEAD(&cpu_buffer->new_pages);
 
        ret = rb_allocate_pages(cpu_buffer, nr_pages);
        if (ret < 0)
@@ -1346,10 +1347,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
                         * If something was added to this page, it was full
                         * since it is not the tail page. So we deduct the
                         * bytes consumed in ring buffer from here.
-                        * No need to update overruns, since this page is
-                        * deleted from ring buffer and its entries are
-                        * already accounted for.
+                        * Increment overrun to account for the lost events.
                         */
+                       local_add(page_entries, &cpu_buffer->overrun);
                        local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
                }
 
index 49249c28690dbb21b6914e2907b16af56e1094c9..a7fa0702be1cd5b269ed6f15e9624f4b02968b11 100644 (file)
@@ -3609,6 +3609,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
@@ -3680,7 +3681,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        ret = splice_to_pipe(pipe, &spd);
 out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 
 out_err:
@@ -4231,6 +4232,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -4318,7 +4320,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        }
 
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 out:
        return ret;
 }
index 518aea714d21d9dea1c7d52b0cf6878482ca5d82..66ce414891330964aacb48dee8079138034cad3d 100644 (file)
@@ -78,7 +78,7 @@ static LIST_HEAD(free_entries);
 static DEFINE_SPINLOCK(free_entries_lock);
 
 /* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
+static u32 global_disable __read_mostly;
 
 /* Global error count */
 static u32 error_count;
@@ -657,7 +657,7 @@ static int dma_debug_fs_init(void)
 
        global_disable_dent = debugfs_create_bool("disabled", 0444,
                        dma_debug_dent,
-                       (u32 *)&global_disable);
+                       &global_disable);
        if (!global_disable_dent)
                goto out_err;
 
index ec4fcb7a56c8975492d656940906af6153136e51..73096630cb35ac4d04209251f5cfba77f0c765ed 100644 (file)
@@ -698,7 +698,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                unsigned long size, unsigned long align,
                                unsigned long goal, unsigned long limit)
 {
index 7ea259d82a998c0397e976216ac861576e638100..2f42d952853970b5dd1f95a149d8750e7d570357 100644 (file)
@@ -701,8 +701,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                if (err) {
                        putback_lru_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
+                       if (err == -ENOMEM) {
+                               ret = COMPACT_PARTIAL;
+                               goto out;
+                       }
                }
-
        }
 
 out:
index deff1b64a08c36ef4857590e9913717488953014..14d260fa0d17939a2279c244df91789cd30720e4 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/ksm.h>
 #include <linux/fs.h>
+#include <linux/file.h>
 
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
@@ -204,14 +205,16 @@ static long madvise_remove(struct vm_area_struct *vma,
 {
        loff_t offset;
        int error;
+       struct file *f;
 
        *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
 
        if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
                return -EINVAL;
 
-       if (!vma->vm_file || !vma->vm_file->f_mapping
-               || !vma->vm_file->f_mapping->host) {
+       f = vma->vm_file;
+
+       if (!f || !f->f_mapping || !f->f_mapping->host) {
                        return -EINVAL;
        }
 
@@ -221,11 +224,18 @@ static long madvise_remove(struct vm_area_struct *vma,
        offset = (loff_t)(start - vma->vm_start)
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
-       /* filesystem's fallocate may need to take i_mutex */
+       /*
+        * Filesystem's fallocate may need to take i_mutex.  We need to
+        * explicitly grab a reference because the vma (and hence the
+        * vma's reference to the file) can go away as soon as we drop
+        * mmap_sem.
+        */
+       get_file(f);
        up_read(&current->mm->mmap_sem);
-       error = do_fallocate(vma->vm_file,
+       error = do_fallocate(f,
                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                                offset, end - start);
+       fput(f);
        down_read(&current->mm->mmap_sem);
        return error;
 }
index d4382095f8bdcda1493feb7fc835a93432027f42..5cc6731b00ccd05ff2f5b610627a0a2848dc5544 100644 (file)
@@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
                                           MAX_NUMNODES);
 }
 
-/*
- * Free memblock.reserved.regions
- */
-int __init_memblock memblock_free_reserved_regions(void)
-{
-       if (memblock.reserved.regions == memblock_reserved_init_regions)
-               return 0;
-
-       return memblock_free(__pa(memblock.reserved.regions),
-                sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
-/*
- * Reserve memblock.reserved.regions
- */
-int __init_memblock memblock_reserve_reserved_regions(void)
-{
-       if (memblock.reserved.regions == memblock_reserved_init_regions)
-               return 0;
-
-       return memblock_reserve(__pa(memblock.reserved.regions),
-                sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 {
        type->total_size -= type->regions[r].size;
@@ -184,6 +160,18 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
        }
 }
 
+phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
+                                       phys_addr_t *addr)
+{
+       if (memblock.reserved.regions == memblock_reserved_init_regions)
+               return 0;
+
+       *addr = __pa(memblock.reserved.regions);
+
+       return PAGE_ALIGN(sizeof(struct memblock_region) *
+                         memblock.reserved.max);
+}
+
 /**
  * memblock_double_array - double the size of the memblock regions array
  * @type: memblock type of the regions array being doubled
@@ -204,6 +192,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
                                                phys_addr_t new_area_size)
 {
        struct memblock_region *new_array, *old_array;
+       phys_addr_t old_alloc_size, new_alloc_size;
        phys_addr_t old_size, new_size, addr;
        int use_slab = slab_is_available();
        int *in_slab;
@@ -217,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
        /* Calculate new doubled size */
        old_size = type->max * sizeof(struct memblock_region);
        new_size = old_size << 1;
+       /*
+        * We need to allocated new one align to PAGE_SIZE,
+        *   so we can free them completely later.
+        */
+       old_alloc_size = PAGE_ALIGN(old_size);
+       new_alloc_size = PAGE_ALIGN(new_size);
 
        /* Retrieve the slab flag */
        if (type == &memblock.memory)
@@ -245,11 +240,11 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
 
                addr = memblock_find_in_range(new_area_start + new_area_size,
                                                memblock.current_limit,
-                                               new_size, sizeof(phys_addr_t));
+                                               new_alloc_size, PAGE_SIZE);
                if (!addr && new_area_size)
                        addr = memblock_find_in_range(0,
                                        min(new_area_start, memblock.current_limit),
-                                       new_size, sizeof(phys_addr_t));
+                                       new_alloc_size, PAGE_SIZE);
 
                new_array = addr ? __va(addr) : 0;
        }
@@ -279,13 +274,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
                kfree(old_array);
        else if (old_array != memblock_memory_init_regions &&
                 old_array != memblock_reserved_init_regions)
-               memblock_free(__pa(old_array), old_size);
+               memblock_free(__pa(old_array), old_alloc_size);
 
        /* Reserve the new array if that comes from the memblock.
         * Otherwise, we needn't do it
         */
        if (!use_slab)
-               BUG_ON(memblock_reserve(addr, new_size));
+               BUG_ON(memblock_reserve(addr, new_alloc_size));
 
        /* Update slab flag */
        *in_slab = use_slab;
index 0d7e3ec8e0f3cc997b5fa0b422f5fa39caffe413..427bb291dd0fdeeb6db93c86c66df6a6f92c9b28 100644 (file)
@@ -618,7 +618,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
                pgdat = hotadd_new_pgdat(nid, start);
                ret = -ENOMEM;
                if (!pgdat)
-                       goto out;
+                       goto error;
                new_pgdat = 1;
        }
 
index d23415c001bc4c5847986c7659261ca335888382..405573010f99a8b0d877dd980e284979ebd2ec69 100644 (file)
@@ -105,27 +105,35 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
                __free_pages_bootmem(pfn_to_page(i), 0);
 }
 
+static unsigned long __init __free_memory_core(phys_addr_t start,
+                                phys_addr_t end)
+{
+       unsigned long start_pfn = PFN_UP(start);
+       unsigned long end_pfn = min_t(unsigned long,
+                                     PFN_DOWN(end), max_low_pfn);
+
+       if (start_pfn > end_pfn)
+               return 0;
+
+       __free_pages_memory(start_pfn, end_pfn);
+
+       return end_pfn - start_pfn;
+}
+
 unsigned long __init free_low_memory_core_early(int nodeid)
 {
        unsigned long count = 0;
-       phys_addr_t start, end;
+       phys_addr_t start, end, size;
        u64 i;
 
-       /* free reserved array temporarily so that it's treated as free area */
-       memblock_free_reserved_regions();
-
-       for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
-               unsigned long start_pfn = PFN_UP(start);
-               unsigned long end_pfn = min_t(unsigned long,
-                                             PFN_DOWN(end), max_low_pfn);
-               if (start_pfn < end_pfn) {
-                       __free_pages_memory(start_pfn, end_pfn);
-                       count += end_pfn - start_pfn;
-               }
-       }
+       for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
+               count += __free_memory_core(start, end);
+
+       /* free range that is used for reserved array if we allocate it */
+       size = get_allocated_memblock_reserved_regions_info(&start);
+       if (size)
+               count += __free_memory_core(start, start + size);
 
-       /* put region array back? */
-       memblock_reserve_reserved_regions();
        return count;
 }
 
@@ -274,7 +282,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                                   unsigned long size,
                                                   unsigned long align,
                                                   unsigned long goal,
index a15a466d0d1d14b21bd82de2bfd3df31654420db..bd106361be4bf2d080e67f11252198c710745278 100644 (file)
@@ -263,6 +263,24 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
        return 0;
 }
 
+/*
+ * Sometimes, before we decide whether to proceed or to fail, we must check
+ * that an entry was not already brought back from swap by a racing thread.
+ *
+ * Checking page is not enough: by the time a SwapCache page is locked, it
+ * might be reused, and again be SwapCache, using the same swap as before.
+ */
+static bool shmem_confirm_swap(struct address_space *mapping,
+                              pgoff_t index, swp_entry_t swap)
+{
+       void *item;
+
+       rcu_read_lock();
+       item = radix_tree_lookup(&mapping->page_tree, index);
+       rcu_read_unlock();
+       return item == swp_to_radix_entry(swap);
+}
+
 /*
  * Like add_to_page_cache_locked, but error if expected item has gone.
  */
@@ -270,40 +288,31 @@ static int shmem_add_to_page_cache(struct page *page,
                                   struct address_space *mapping,
                                   pgoff_t index, gfp_t gfp, void *expected)
 {
-       int error = 0;
+       int error;
 
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(!PageSwapBacked(page));
 
+       page_cache_get(page);
+       page->mapping = mapping;
+       page->index = index;
+
+       spin_lock_irq(&mapping->tree_lock);
        if (!expected)
-               error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+               error = radix_tree_insert(&mapping->page_tree, index, page);
+       else
+               error = shmem_radix_tree_replace(mapping, index, expected,
+                                                                page);
        if (!error) {
-               page_cache_get(page);
-               page->mapping = mapping;
-               page->index = index;
-
-               spin_lock_irq(&mapping->tree_lock);
-               if (!expected)
-                       error = radix_tree_insert(&mapping->page_tree,
-                                                       index, page);
-               else
-                       error = shmem_radix_tree_replace(mapping, index,
-                                                       expected, page);
-               if (!error) {
-                       mapping->nrpages++;
-                       __inc_zone_page_state(page, NR_FILE_PAGES);
-                       __inc_zone_page_state(page, NR_SHMEM);
-                       spin_unlock_irq(&mapping->tree_lock);
-               } else {
-                       page->mapping = NULL;
-                       spin_unlock_irq(&mapping->tree_lock);
-                       page_cache_release(page);
-               }
-               if (!expected)
-                       radix_tree_preload_end();
+               mapping->nrpages++;
+               __inc_zone_page_state(page, NR_FILE_PAGES);
+               __inc_zone_page_state(page, NR_SHMEM);
+               spin_unlock_irq(&mapping->tree_lock);
+       } else {
+               page->mapping = NULL;
+               spin_unlock_irq(&mapping->tree_lock);
+               page_cache_release(page);
        }
-       if (error)
-               mem_cgroup_uncharge_cache_page(page);
        return error;
 }
 
@@ -1124,9 +1133,9 @@ repeat:
                /* We have to do this with page locked to prevent races */
                lock_page(page);
                if (!PageSwapCache(page) || page_private(page) != swap.val ||
-                   page->mapping) {
+                   !shmem_confirm_swap(mapping, index, swap)) {
                        error = -EEXIST;        /* try again */
-                       goto failed;
+                       goto unlock;
                }
                if (!PageUptodate(page)) {
                        error = -EIO;
@@ -1142,9 +1151,12 @@ repeat:
 
                error = mem_cgroup_cache_charge(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
-               if (!error)
+               if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
                                                gfp, swp_to_radix_entry(swap));
+                       /* We already confirmed swap, and make no allocation */
+                       VM_BUG_ON(error);
+               }
                if (error)
                        goto failed;
 
@@ -1181,11 +1193,18 @@ repeat:
                __set_page_locked(page);
                error = mem_cgroup_cache_charge(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
-               if (!error)
-                       error = shmem_add_to_page_cache(page, mapping, index,
-                                               gfp, NULL);
                if (error)
                        goto decused;
+               error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+               if (!error) {
+                       error = shmem_add_to_page_cache(page, mapping, index,
+                                                       gfp, NULL);
+                       radix_tree_preload_end();
+               }
+               if (error) {
+                       mem_cgroup_uncharge_cache_page(page);
+                       goto decused;
+               }
                lru_cache_add_anon(page);
 
                spin_lock(&info->lock);
@@ -1245,14 +1264,10 @@ decused:
 unacct:
        shmem_unacct_blocks(info->flags, 1);
 failed:
-       if (swap.val && error != -EINVAL) {
-               struct page *test = find_get_page(mapping, index);
-               if (test && !radix_tree_exceptional_entry(test))
-                       page_cache_release(test);
-               /* Have another try if the entry has changed */
-               if (test != swp_to_radix_entry(swap))
-                       error = -EEXIST;
-       }
+       if (swap.val && error != -EINVAL &&
+           !shmem_confirm_swap(mapping, index, swap))
+               error = -EEXIST;
+unlock:
        if (page) {
                unlock_page(page);
                page_cache_release(page);
@@ -1264,7 +1279,7 @@ failed:
                spin_unlock(&info->lock);
                goto repeat;
        }
-       if (error == -EEXIST)
+       if (error == -EEXIST)   /* from above or from radix_tree_insert */
                goto repeat;
        return error;
 }
@@ -1594,6 +1609,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1682,7 +1698,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 
        if (error > 0) {
                *ppos += error;
@@ -1691,98 +1707,6 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        return error;
 }
 
-/*
- * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
- */
-static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
-                                   pgoff_t index, pgoff_t end, int origin)
-{
-       struct page *page;
-       struct pagevec pvec;
-       pgoff_t indices[PAGEVEC_SIZE];
-       bool done = false;
-       int i;
-
-       pagevec_init(&pvec, 0);
-       pvec.nr = 1;            /* start small: we may be there already */
-       while (!done) {
-               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
-                                       pvec.nr, pvec.pages, indices);
-               if (!pvec.nr) {
-                       if (origin == SEEK_DATA)
-                               index = end;
-                       break;
-               }
-               for (i = 0; i < pvec.nr; i++, index++) {
-                       if (index < indices[i]) {
-                               if (origin == SEEK_HOLE) {
-                                       done = true;
-                                       break;
-                               }
-                               index = indices[i];
-                       }
-                       page = pvec.pages[i];
-                       if (page && !radix_tree_exceptional_entry(page)) {
-                               if (!PageUptodate(page))
-                                       page = NULL;
-                       }
-                       if (index >= end ||
-                           (page && origin == SEEK_DATA) ||
-                           (!page && origin == SEEK_HOLE)) {
-                               done = true;
-                               break;
-                       }
-               }
-               shmem_deswap_pagevec(&pvec);
-               pagevec_release(&pvec);
-               pvec.nr = PAGEVEC_SIZE;
-               cond_resched();
-       }
-       return index;
-}
-
-static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
-{
-       struct address_space *mapping;
-       struct inode *inode;
-       pgoff_t start, end;
-       loff_t new_offset;
-
-       if (origin != SEEK_DATA && origin != SEEK_HOLE)
-               return generic_file_llseek_size(file, offset, origin,
-                                                       MAX_LFS_FILESIZE);
-       mapping = file->f_mapping;
-       inode = mapping->host;
-       mutex_lock(&inode->i_mutex);
-       /* We're holding i_mutex so we can access i_size directly */
-
-       if (offset < 0)
-               offset = -EINVAL;
-       else if (offset >= inode->i_size)
-               offset = -ENXIO;
-       else {
-               start = offset >> PAGE_CACHE_SHIFT;
-               end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-               new_offset = shmem_seek_hole_data(mapping, start, end, origin);
-               new_offset <<= PAGE_CACHE_SHIFT;
-               if (new_offset > offset) {
-                       if (new_offset < inode->i_size)
-                               offset = new_offset;
-                       else if (origin == SEEK_DATA)
-                               offset = -ENXIO;
-                       else
-                               offset = inode->i_size;
-               }
-       }
-
-       if (offset >= 0 && offset != file->f_pos) {
-               file->f_pos = offset;
-               file->f_version = 0;
-       }
-       mutex_unlock(&inode->i_mutex);
-       return offset;
-}
-
 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                                         loff_t len)
 {
@@ -2786,7 +2710,7 @@ static const struct address_space_operations shmem_aops = {
 static const struct file_operations shmem_file_operations = {
        .mmap           = shmem_mmap,
 #ifdef CONFIG_TMPFS
-       .llseek         = shmem_file_llseek,
+       .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .aio_read       = shmem_file_aio_read,
index 6a4bf9160e855ae1e2d61fefb4922918f710bb24..c7bb952400c83c9114a401f647955cb18dd2ea44 100644 (file)
@@ -275,8 +275,9 @@ static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
                                         unsigned long size)
 {
-       pg_data_t *host_pgdat;
-       unsigned long goal;
+       unsigned long goal, limit;
+       unsigned long *p;
+       int nid;
        /*
         * A page may contain usemaps for other sections preventing the
         * page being freed and making a section unremovable while
@@ -287,10 +288,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
         * from the same section as the pgdat where possible to avoid
         * this problem.
         */
-       goal = __pa(pgdat) & PAGE_SECTION_MASK;
-       host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
-       return __alloc_bootmem_node_nopanic(host_pgdat, size,
-                                           SMP_CACHE_BYTES, goal);
+       goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+       limit = goal + (1UL << PA_SECTION_SHIFT);
+       nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
+again:
+       p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
+                                         SMP_CACHE_BYTES, goal, limit);
+       if (!p && limit) {
+               limit = 0;
+               goto again;
+       }
+       return p;
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
index eeb3bc9d1d361b6f20821073485f1b8e7c4931d3..661576324c7f52ead9d5eaf6b3619498c1047336 100644 (file)
@@ -2955,14 +2955,17 @@ int kswapd_run(int nid)
 }
 
 /*
- * Called by memory hotplug when all memory in a node is offlined.
+ * Called by memory hotplug when all memory in a node is offlined.  Caller must
+ * hold lock_memory_hotplug().
  */
 void kswapd_stop(int nid)
 {
        struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
 
-       if (kswapd)
+       if (kswapd) {
                kthread_stop(kswapd);
+               NODE_DATA(nid)->kswapd = NULL;
+       }
 }
 
 static int __init kswapd_init(void)
index 6df214041a5e5bfe85b948b41ce56e15af514860..84f01ba81a34655becfd3be7af5fed76251d21ca 100644 (file)
@@ -1136,8 +1136,8 @@ void dev_load(struct net *net, const char *name)
                no_module = request_module("netdev-%s", name);
        if (no_module && capable(CAP_SYS_MODULE)) {
                if (!request_module("%s", name))
-                       pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
-                              name);
+                       pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+                               name);
        }
 }
 EXPORT_SYMBOL(dev_load);
index d78671e9d545be838f9ab5140c9ee07fb1309c26..46a3d23d259e51c17140798554874e929145d6bb 100644 (file)
@@ -1755,6 +1755,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = MAX_SKB_FRAGS,
                .flags = flags,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
index 66e4fcdd1c6b4a6a07a7d26f3a66bc7e43183807..a4bb856de08f528ae564ab68fd2e83e6f779075f 100644 (file)
@@ -1342,7 +1342,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        u32 changed = 0;
-       u8 bssid[ETH_ALEN];
 
        ASSERT_MGD_MTX(ifmgd);
 
@@ -1354,10 +1353,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_stop_poll(sdata);
 
-       memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
        ifmgd->associated = NULL;
-       memset(ifmgd->bssid, 0, ETH_ALEN);
 
        /*
         * we need to commit the associated = NULL change because the
@@ -1377,7 +1373,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        netif_carrier_off(sdata->dev);
 
        mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, bssid);
+       sta = sta_info_get(sdata, ifmgd->bssid);
        if (sta) {
                set_sta_flag(sta, WLAN_STA_BLOCK_BA);
                ieee80211_sta_tear_down_BA_sessions(sta, tx);
@@ -1386,13 +1382,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        /* deauthenticate/disassociate now */
        if (tx || frame_buf)
-               ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
-                                              tx, frame_buf);
+               ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
+                                              reason, tx, frame_buf);
 
        /* flush out frame */
        if (tx)
                drv_flush(local, false);
 
+       /* clear bssid only after building the needed mgmt frames */
+       memset(ifmgd->bssid, 0, ETH_ALEN);
+
        /* remove AP and TDLS peers */
        sta_info_flush(local, sdata);
 
index 7bcecf73aafbf9dadb87a2c8a114e3eb92490131..965e6ec0adb6c12393ced183a44463a63e09a9ff 100644 (file)
@@ -2455,7 +2455,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
         * frames that we didn't handle, including returning unknown
         * ones. For all other modes we will return them to the sender,
         * setting the 0x80 bit in the action category, as required by
-        * 802.11-2007 7.3.1.11.
+        * 802.11-2012 9.24.4.
         * Newer versions of hostapd shall also use the management frame
         * registration mechanisms, but older ones still use cooked
         * monitor interfaces so push all frames there.
@@ -2465,6 +2465,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
             sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
                return RX_DROP_MONITOR;
 
+       if (is_multicast_ether_addr(mgmt->da))
+               return RX_DROP_MONITOR;
+
        /* do not return rejected action frames */
        if (mgmt->u.action.category & 0x80)
                return RX_DROP_UNUSABLE;
index 819c342f5b3012b6a4e37f60414fb835fc0041d8..9730882697aaedbab0beee66f0f12a654b97b63a 100644 (file)
@@ -639,6 +639,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
        return 0;
 }
 
+static int
+ip_set_none(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       return -EOPNOTSUPP;
+}
+
 static int
 ip_set_create(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
@@ -1539,6 +1547,10 @@ nlmsg_failure:
 }
 
 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+       [IPSET_CMD_NONE]        = {
+               .call           = ip_set_none,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+       },
        [IPSET_CMD_CREATE]      = {
                .call           = ip_set_create,
                .attr_count     = IPSET_ATTR_CMD_MAX,
index ee863943c8267286e4b16e52400dd40bf51b4f26..d5d3607ae7bcf5e9704bd189217115cf048aee4b 100644 (file)
@@ -38,30 +38,6 @@ struct iface_node {
 
 #define iface_data(n)  (rb_entry(n, struct iface_node, node)->iface)
 
-static inline long
-ifname_compare(const char *_a, const char *_b)
-{
-       const long *a = (const long *)_a;
-       const long *b = (const long *)_b;
-
-       BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
-       if (a[0] != b[0])
-               return a[0] - b[0];
-       if (IFNAMSIZ > sizeof(long)) {
-               if (a[1] != b[1])
-                       return a[1] - b[1];
-       }
-       if (IFNAMSIZ > 2 * sizeof(long)) {
-               if (a[2] != b[2])
-                       return a[2] - b[2];
-       }
-       if (IFNAMSIZ > 3 * sizeof(long)) {
-               if (a[3] != b[3])
-                       return a[3] - b[3];
-       }
-       return 0;
-}
-
 static void
 rbtree_destroy(struct rb_root *root)
 {
@@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
 
        while (n) {
                const char *d = iface_data(n);
-               long res = ifname_compare(*iface, d);
+               int res = strcmp(*iface, d);
 
                if (res < 0)
                        n = n->rb_left;
@@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
 
        while (*n) {
                char *ifname = iface_data(*n);
-               long res = ifname_compare(*iface, ifname);
+               int res = strcmp(*iface, ifname);
 
                p = *n;
                if (res < 0)
@@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface4_elem data = { .cidr = HOST_MASK };
        u32 ip = 0, ip_to, last;
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem data = { .cidr = HOST_MASK };
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
index dd811b8dd97c66a9da387a4e37073c47e8dc4407..d43e3c122f7bdc5b042b92ce74981149922faa78 100644 (file)
@@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
 
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(struct net *net,
-                                   const struct in6_addr *addr)
+static bool __ip_vs_addr_is_local_v6(struct net *net,
+                                    const struct in6_addr *addr)
 {
-       struct rt6_info *rt;
        struct flowi6 fl6 = {
                .daddr = *addr,
        };
+       struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
+       bool is_local;
 
-       rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
-       if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
-               return 1;
+       is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
 
-       return 0;
+       dst_release(dst);
+       return is_local;
 }
 #endif
 
index 3e797d1fcb94272ad96b9798e91a6e6467031379..791d56bbd74a7613e4b1d87ba74bcf2a8a135e46 100644 (file)
@@ -169,8 +169,10 @@ replay:
 
                err = nla_parse(cda, ss->cb[cb_id].attr_count,
                                attr, attrlen, ss->cb[cb_id].policy);
-               if (err < 0)
+               if (err < 0) {
+                       rcu_read_unlock();
                        return err;
+               }
 
                if (nc->call_rcu) {
                        err = nc->call_rcu(net->nfnl, skb, nlh,
index cb2646179e5f8b8d1836499579d1ccfd109fad60..2ab196a9f228ac873238c2a060685bacea62b87a 100644 (file)
@@ -106,7 +106,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
        nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
        data += 2;
 
-       nfca_poll->nfcid1_len = *data++;
+       nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
 
        pr_debug("sens_res 0x%x, nfcid1_len %d\n",
                 nfca_poll->sens_res, nfca_poll->nfcid1_len);
@@ -130,7 +130,7 @@ static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
                        struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
                                                     __u8 *data)
 {
-       nfcb_poll->sensb_res_len = *data++;
+       nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE);
 
        pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
 
@@ -145,7 +145,7 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
                                                     __u8 *data)
 {
        nfcf_poll->bit_rate = *data++;
-       nfcf_poll->sensf_res_len = *data++;
+       nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE);
 
        pr_debug("bit_rate %d, sensf_res_len %d\n",
                 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
@@ -331,7 +331,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
        switch (ntf->activation_rf_tech_and_mode) {
        case NCI_NFC_A_PASSIVE_POLL_MODE:
                nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
-               nfca_poll->rats_res_len = *data++;
+               nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
                pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
                if (nfca_poll->rats_res_len > 0) {
                        memcpy(nfca_poll->rats_res,
@@ -341,7 +341,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
 
        case NCI_NFC_B_PASSIVE_POLL_MODE:
                nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
-               nfcb_poll->attrib_res_len = *data++;
+               nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50);
                pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
                if (nfcb_poll->attrib_res_len > 0) {
                        memcpy(nfcb_poll->attrib_res,
index ec1134c9e07fcd34fc5d6116e1a4aef9dedf6f23..8b8a6a2b2badaf61e9c71a174809ca989438668f 100644 (file)
@@ -54,7 +54,10 @@ static int rawsock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       pr_debug("sock=%p\n", sock);
+       pr_debug("sock=%p sk=%p\n", sock, sk);
+
+       if (!sk)
+               return 0;
 
        sock_orphan(sk);
        sock_put(sk);
index 5bc9ab161b373eadf51843ebaa77c527716a2122..b16517ee1aaf7cfad94978ed1181df6432da6f07 100644 (file)
@@ -271,6 +271,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         */
        asoc->peer.sack_needed = 1;
        asoc->peer.sack_cnt = 0;
+       asoc->peer.sack_generation = 1;
 
        /* Assume that the peer will tell us if he recognizes ASCONF
         * as part of INIT exchange.
index f1b7d4bb591e9b648865c4e096ef838e77678442..6ae47acaaec65ceb898e10e462454e667a8e739e 100644 (file)
@@ -248,6 +248,11 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
                /* If the SACK timer is running, we have a pending SACK */
                if (timer_pending(timer)) {
                        struct sctp_chunk *sack;
+
+                       if (pkt->transport->sack_generation !=
+                           pkt->transport->asoc->peer.sack_generation)
+                               return retval;
+
                        asoc->a_rwnd = asoc->rwnd;
                        sack = sctp_make_sack(asoc);
                        if (sack) {
index a85eeeb55dd0022e009895c53a6d8593929b7691..b6de71efb140c538a66e0a7901df2b4402a85bf4 100644 (file)
@@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
        int len;
        __u32 ctsn;
        __u16 num_gabs, num_dup_tsns;
+       struct sctp_association *aptr = (struct sctp_association *)asoc;
        struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
        struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+       struct sctp_transport *trans;
 
        memset(gabs, 0, sizeof(gabs));
        ctsn = sctp_tsnmap_get_ctsn(map);
@@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
                sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
                                 sctp_tsnmap_get_dups(map));
 
+       /* Once we have a sack generated, check to see what our sack
+        * generation is, if its 0, reset the transports to 0, and reset
+        * the association generation to 1
+        *
+        * The idea is that zero is never used as a valid generation for the
+        * association so no transport will match after a wrap event like this,
+        * Until the next sack
+        */
+       if (++aptr->peer.sack_generation == 0) {
+               list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+                                   transports)
+                       trans->sack_generation = 0;
+               aptr->peer.sack_generation = 1;
+       }
 nodata:
        return retval;
 }
index c96d1a81cf4209f6d3ca9b6762fefa47e75867c3..8716da1a859221dc96d206ed517190e9e9cfa2ca 100644 (file)
@@ -1268,7 +1268,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_REPORT_TSN:
                        /* Record the arrival of a TSN.  */
                        error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                                                cmd->obj.u32);
+                                                cmd->obj.u32, NULL);
                        break;
 
                case SCTP_CMD_REPORT_FWDTSN:
index b026ba0c69922e09eb5c150298f650ea7bb3d1c4..1dcceb6e0ce6c2b9e5f6c0c3abf8075c3e75224c 100644 (file)
@@ -68,6 +68,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
        peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
        memset(&peer->saddr, 0, sizeof(union sctp_addr));
 
+       peer->sack_generation = 0;
+
        /* From 6.3.1 RTO Calculation:
         *
         * C1) Until an RTT measurement has been made for a packet sent to the
index f1e40cebc981ae7962bb9cd20ae84823b70d43cf..b5fb7c409023ff8adea81d41e68ace60781febae 100644 (file)
@@ -114,7 +114,8 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
 
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
+int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
+                    struct sctp_transport *trans)
 {
        u16 gap;
 
@@ -133,6 +134,9 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
                 */
                map->max_tsn_seen++;
                map->cumulative_tsn_ack_point++;
+               if (trans)
+                       trans->sack_generation =
+                               trans->asoc->peer.sack_generation;
                map->base_tsn++;
        } else {
                /* Either we already have a gap, or about to record a gap, so
index 8a84017834c211a840e83c1e39bebdbb001ec5c4..33d894776192205cd4b4a9573ccf70664c723b2d 100644 (file)
@@ -715,7 +715,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
         * can mark it as received so the tsn_map is updated correctly.
         */
        if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                            ntohl(chunk->subh.data_hdr->tsn)))
+                            ntohl(chunk->subh.data_hdr->tsn),
+                            chunk->transport))
                goto fail_mark;
 
        /* First calculate the padding, so we don't inadvertently
index f2d1de7f2ffbd5a219759f55bdc546f2edbf19b0..f5a6a4f4faf721af4874538093cb003f4efc202c 100644 (file)
@@ -1051,7 +1051,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        if (chunk && (freed >= needed)) {
                __u32 tsn;
                tsn = ntohl(chunk->subh.data_hdr->tsn);
-               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
+               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
                sctp_ulpq_tail_data(ulpq, chunk, gfp);
 
                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
index 3efc9b12aef44016201b02eeafcc10140f17a240..860aeb349cb337bbccf4346d99120d4d1fd51c90 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mman.h>
 #include <linux/mount.h>
 #include <linux/personality.h>
+#include <linux/backing-dev.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR      2
index 5ccf10a4d5932b187bba679e40e8f424d4739551..aa4c25e0f3277fce38520c72c9759bfc90b9022d 100644 (file)
@@ -6688,6 +6688,31 @@ static const struct alc_model_fixup alc662_fixup_models[] = {
        {}
 };
 
+static void alc662_fill_coef(struct hda_codec *codec)
+{
+       int val, coef;
+
+       coef = alc_get_coef0(codec);
+
+       switch (codec->vendor_id) {
+       case 0x10ec0662:
+               if ((coef & 0x00f0) == 0x0030) {
+                       val = alc_read_coef_idx(codec, 0x4); /* EAPD Ctrl */
+                       alc_write_coef_idx(codec, 0x4, val & ~(1<<10));
+               }
+               break;
+       case 0x10ec0272:
+       case 0x10ec0273:
+       case 0x10ec0663:
+       case 0x10ec0665:
+       case 0x10ec0670:
+       case 0x10ec0671:
+       case 0x10ec0672:
+               val = alc_read_coef_idx(codec, 0xd); /* EAPD Ctrl */
+               alc_write_coef_idx(codec, 0xd, val | (1<<14));
+               break;
+       }
+}
 
 /*
  */
@@ -6707,6 +6732,9 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_fix_pll_init(codec, 0x20, 0x04, 15);
 
+       spec->init_hook = alc662_fill_coef;
+       alc662_fill_coef(codec);
+
        alc_pick_fixup(codec, alc662_fixup_models,
                       alc662_fixup_tbl, alc662_fixups);
        alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
index 64d2a4fa34b27d81ad84d0e64ed625e04e293b7a..e9b62b5ea637580ea7e8904ec2c68662ed1471fc 100644 (file)
@@ -935,9 +935,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
        }
 
 found:
-       data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
-       snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
-                     data | (pll_p << PLLP_SHIFT));
+       snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
        snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
                      pll_r << PLLR_SHIFT);
        snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
index 6f097fb60683e7b5605b38e373c7f1c75f4158c9..08c7f6685ff0937a367824c7a731fd4fb8e48d72 100644 (file)
 
 /* PLL registers bitfields */
 #define PLLP_SHIFT             0
+#define PLLP_MASK              7
 #define PLLQ_SHIFT             3
 #define PLLR_SHIFT             0
 #define PLLJ_SHIFT             2
index acbdc5fde9236be0e43a5767bd0135eb92450e68..32682c1b7cdece413693db6f6487ae4df640da09 100644 (file)
@@ -1491,6 +1491,7 @@ static int wm2200_bclk_rates_dat[WM2200_NUM_BCLK_RATES] = {
 
 static int wm2200_bclk_rates_cd[WM2200_NUM_BCLK_RATES] = {
        5644800,
+       3763200,
        2882400,
        1881600,
        1411200,
index e6906901debbc2aaa71c40621fbd4241d8d15663..0f647d22cb4ac7bccffe25311011255cf0b33104 100644 (file)
@@ -414,7 +414,7 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
 {
        struct list_head *p;
        struct snd_usb_endpoint *ep;
-       int ret, is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
+       int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
 
        mutex_lock(&chip->mutex);
 
@@ -434,16 +434,6 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
                    type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
                    ep_num);
 
-       /* select the alt setting once so the endpoints become valid */
-       ret = usb_set_interface(chip->dev, alts->desc.bInterfaceNumber,
-                               alts->desc.bAlternateSetting);
-       if (ret < 0) {
-               snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
-                                       __func__, ret);
-               ep = NULL;
-               goto __exit_unlock;
-       }
-
        ep = kzalloc(sizeof(*ep), GFP_KERNEL);
        if (!ep)
                goto __exit_unlock;
@@ -831,9 +821,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
        if (++ep->use_count != 1)
                return 0;
 
-       if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
-               return -EINVAL;
-
        /* just to be sure */
        deactivate_urbs(ep, 0, 1);
        wait_clear_urbs(ep);
@@ -911,9 +898,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
        if (snd_BUG_ON(ep->use_count == 0))
                return;
 
-       if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
-               return;
-
        if (--ep->use_count == 0) {
                deactivate_urbs(ep, force, can_sleep);
                ep->data_subs = NULL;
@@ -926,42 +910,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
        }
 }
 
-/**
- * snd_usb_endpoint_activate: activate an snd_usb_endpoint
- *
- * @ep: the endpoint to activate
- *
- * If the endpoint is not currently in use, this functions will select the
- * correct alternate interface setting for the interface of this endpoint.
- *
- * In case of any active users, this functions does nothing.
- *
- * Returns an error if usb_set_interface() failed, 0 in all other
- * cases.
- */
-int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep)
-{
-       if (ep->use_count != 0)
-               return 0;
-
-       if (!ep->chip->shutdown &&
-           !test_and_set_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
-               int ret;
-
-               ret = usb_set_interface(ep->chip->dev, ep->iface, ep->alt_idx);
-               if (ret < 0) {
-                       snd_printk(KERN_ERR "%s() usb_set_interface() failed, ret = %d\n",
-                                               __func__, ret);
-                       clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
-                       return ret;
-               }
-
-               return 0;
-       }
-
-       return -EBUSY;
-}
-
 /**
  * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
  *
@@ -980,24 +928,15 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
        if (!ep)
                return -EINVAL;
 
+       deactivate_urbs(ep, 1, 1);
+       wait_clear_urbs(ep);
+
        if (ep->use_count != 0)
                return 0;
 
-       if (!ep->chip->shutdown &&
-           test_and_clear_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
-               int ret;
-
-               ret = usb_set_interface(ep->chip->dev, ep->iface, 0);
-               if (ret < 0) {
-                       snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
-                                               __func__, ret);
-                       return ret;
-               }
+       clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
 
-               return 0;
-       }
-
-       return -EBUSY;
+       return 0;
 }
 
 /**
index 54607f8c4f66ca91e5502808aa9fc94ceebc4d76..a1298f379428280045bf661f89e5fa59bb0acb48 100644 (file)
@@ -261,19 +261,6 @@ static void stop_endpoints(struct snd_usb_substream *subs,
                                      force, can_sleep, wait);
 }
 
-static int activate_endpoints(struct snd_usb_substream *subs)
-{
-       if (subs->sync_endpoint) {
-               int ret;
-
-               ret = snd_usb_endpoint_activate(subs->sync_endpoint);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return snd_usb_endpoint_activate(subs->data_endpoint);
-}
-
 static int deactivate_endpoints(struct snd_usb_substream *subs)
 {
        int reta, retb;
@@ -314,6 +301,33 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        if (fmt == subs->cur_audiofmt)
                return 0;
 
+       /* close the old interface */
+       if (subs->interface >= 0 && subs->interface != fmt->iface) {
+               err = usb_set_interface(subs->dev, subs->interface, 0);
+               if (err < 0) {
+                       snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed (%d)\n",
+                               dev->devnum, fmt->iface, fmt->altsetting, err);
+                       return -EIO;
+               }
+               subs->interface = -1;
+               subs->altset_idx = 0;
+       }
+
+       /* set interface */
+       if (subs->interface != fmt->iface ||
+           subs->altset_idx != fmt->altset_idx) {
+               err = usb_set_interface(dev, fmt->iface, fmt->altsetting);
+               if (err < 0) {
+                       snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed (%d)\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting, err);
+                       return -EIO;
+               }
+               snd_printdd(KERN_INFO "setting usb interface %d:%d\n",
+                               fmt->iface, fmt->altsetting);
+               subs->interface = fmt->iface;
+               subs->altset_idx = fmt->altset_idx;
+       }
+
        subs->data_endpoint = snd_usb_add_endpoint(subs->stream->chip,
                                                   alts, fmt->endpoint, subs->direction,
                                                   SND_USB_ENDPOINT_TYPE_DATA);
@@ -387,7 +401,7 @@ add_sync_ep:
                subs->data_endpoint->sync_master = subs->sync_endpoint;
        }
 
-       if ((err = snd_usb_init_pitch(subs->stream->chip, subs->interface, alts, fmt)) < 0)
+       if ((err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt)) < 0)
                return err;
 
        subs->cur_audiofmt = fmt;
@@ -450,7 +464,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                struct usb_interface *iface;
                iface = usb_ifnum_to_if(subs->dev, fmt->iface);
                alts = &iface->altsetting[fmt->altset_idx];
-               ret = snd_usb_init_sample_rate(subs->stream->chip, subs->interface, alts, fmt, rate);
+               ret = snd_usb_init_sample_rate(subs->stream->chip, fmt->iface, alts, fmt, rate);
                if (ret < 0)
                        return ret;
                subs->cur_rate = rate;
@@ -460,12 +474,6 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                mutex_lock(&subs->stream->chip->shutdown_mutex);
                /* format changed */
                stop_endpoints(subs, 0, 0, 0);
-               deactivate_endpoints(subs);
-
-               ret = activate_endpoints(subs);
-               if (ret < 0)
-                       goto unlock;
-
                ret = snd_usb_endpoint_set_params(subs->data_endpoint, hw_params, fmt,
                                                  subs->sync_endpoint);
                if (ret < 0)
@@ -500,6 +508,7 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
        subs->period_bytes = 0;
        mutex_lock(&subs->stream->chip->shutdown_mutex);
        stop_endpoints(subs, 0, 1, 1);
+       deactivate_endpoints(subs);
        mutex_unlock(&subs->stream->chip->shutdown_mutex);
        return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
@@ -938,16 +947,20 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
 
 static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
 {
-       int ret;
        struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
        struct snd_usb_substream *subs = &as->substream[direction];
 
        stop_endpoints(subs, 0, 0, 0);
-       ret = deactivate_endpoints(subs);
+
+       if (!as->chip->shutdown && subs->interface >= 0) {
+               usb_set_interface(subs->dev, subs->interface, 0);
+               subs->interface = -1;
+       }
+
        subs->pcm_substream = NULL;
        snd_usb_autosuspend(subs->stream->chip);
 
-       return ret;
+       return 0;
 }
 
 /* Since a URB can handle only a single linear buffer, we must use double
index 35ae56864e4f59625369941886b196438f107c99..a1f4e3669142630aa641d1a34b0f53673b6b67b0 100644 (file)
@@ -669,25 +669,26 @@ struct machine *machines__find(struct rb_root *self, pid_t pid)
 struct machine *machines__findnew(struct rb_root *self, pid_t pid)
 {
        char path[PATH_MAX];
-       const char *root_dir;
+       const char *root_dir = "";
        struct machine *machine = machines__find(self, pid);
 
-       if (!machine || machine->pid != pid) {
-               if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
-                       root_dir = "";
-               else {
-                       if (!symbol_conf.guestmount)
-                               goto out;
-                       sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
-                       if (access(path, R_OK)) {
-                               pr_err("Can't access file %s\n", path);
-                               goto out;
-                       }
-                       root_dir = path;
+       if (machine && (machine->pid == pid))
+               goto out;
+
+       if ((pid != HOST_KERNEL_ID) &&
+           (pid != DEFAULT_GUEST_KERNEL_ID) &&
+           (symbol_conf.guestmount)) {
+               sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
+               if (access(path, R_OK)) {
+                       pr_err("Can't access file %s\n", path);
+                       machine = NULL;
+                       goto out;
                }
-               machine = machines__add(self, pid, root_dir);
+               root_dir = path;
        }
 
+       machine = machines__add(self, pid, root_dir);
+
 out:
        return machine;
 }
index c3e399bcf18deea25f4db0aafa69278b5b4cb570..56142d0fb8d79bfdc1bd21cd7ac24f0ded4627bf 100644 (file)
@@ -926,7 +926,7 @@ static struct machine *
                else
                        pid = event->ip.pid;
 
-               return perf_session__find_machine(session, pid);
+               return perf_session__findnew_machine(session, pid);
        }
 
        return perf_session__find_host_machine(session);
index df2fddbf0cd2f46d376d691786295526fa1deab0..5dd3b5ec8411191c41d76d19345496b3923f9a6f 100644 (file)
@@ -198,9 +198,8 @@ void print_trace_event(int cpu, void *data, int size)
        record.data = data;
 
        trace_seq_init(&s);
-       pevent_print_event(pevent, &s, &record);
+       pevent_event_info(&s, event, &record);
        trace_seq_do_printf(&s);
-       printf("\n");
 }
 
 void print_event(int cpu, void *data, int size, unsigned long long nsecs,
index b1e091ae2f377bcecd5fadc417776081d88aba53..23a41a9f8db999f4b70fd4c0cd4b6ae8b26ee3c2 100644 (file)
@@ -334,6 +334,11 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
 }
 
 #ifdef __KVM_HAVE_MSI
+static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
 static int assigned_device_enable_host_msi(struct kvm *kvm,
                                           struct kvm_assigned_dev_kernel *dev)
 {
@@ -346,7 +351,7 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
        }
 
        dev->host_irq = dev->dev->irq;
-       if (request_threaded_irq(dev->host_irq, NULL,
+       if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi,
                                 kvm_assigned_dev_thread_msi, 0,
                                 dev->irq_name, dev)) {
                pci_disable_msi(dev->dev);
@@ -358,6 +363,11 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
 #endif
 
 #ifdef __KVM_HAVE_MSIX
+static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
 static int assigned_device_enable_host_msix(struct kvm *kvm,
                                            struct kvm_assigned_dev_kernel *dev)
 {
@@ -374,7 +384,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
 
        for (i = 0; i < dev->entries_nr; i++) {
                r = request_threaded_irq(dev->host_msix_entries[i].vector,
-                                        NULL, kvm_assigned_dev_thread_msix,
+                                        kvm_assigned_dev_msix,
+                                        kvm_assigned_dev_thread_msix,
                                         0, dev->irq_name, dev);
                if (r)
                        goto err;
index f59c1e8de7a2e62b5977d90ab992e1c2240e80f5..7d7e2aaffece234a81cef3f4181cc7ffe5bb14cb 100644 (file)
@@ -198,7 +198,7 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
 }
 
 static int
-kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct kvm_irq_routing_table *irq_rt;
        struct _irqfd *irqfd, *tmp;
@@ -212,12 +212,12 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
                return -ENOMEM;
 
        irqfd->kvm = kvm;
-       irqfd->gsi = gsi;
+       irqfd->gsi = args->gsi;
        INIT_LIST_HEAD(&irqfd->list);
        INIT_WORK(&irqfd->inject, irqfd_inject);
        INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
 
-       file = eventfd_fget(fd);
+       file = eventfd_fget(args->fd);
        if (IS_ERR(file)) {
                ret = PTR_ERR(file);
                goto fail;
@@ -298,19 +298,19 @@ kvm_eventfd_init(struct kvm *kvm)
  * shutdown any irqfd's that match fd+gsi
  */
 static int
-kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct _irqfd *irqfd, *tmp;
        struct eventfd_ctx *eventfd;
 
-       eventfd = eventfd_ctx_fdget(fd);
+       eventfd = eventfd_ctx_fdget(args->fd);
        if (IS_ERR(eventfd))
                return PTR_ERR(eventfd);
 
        spin_lock_irq(&kvm->irqfds.lock);
 
        list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
-               if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+               if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
                        /*
                         * This rcu_assign_pointer is needed for when
                         * another thread calls kvm_irq_routing_update before
@@ -338,12 +338,15 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
 }
 
 int
-kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
-       if (flags & KVM_IRQFD_FLAG_DEASSIGN)
-               return kvm_irqfd_deassign(kvm, fd, gsi);
+       if (args->flags & ~KVM_IRQFD_FLAG_DEASSIGN)
+               return -EINVAL;
+
+       if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
+               return kvm_irqfd_deassign(kvm, args);
 
-       return kvm_irqfd_assign(kvm, fd, gsi);
+       return kvm_irqfd_assign(kvm, args);
 }
 
 /*
index 7e140683ff14d503a9714058cadd9dde7e4ffaf9..44ee7124b16dae1820ca1ca1c79f2a099979da12 100644 (file)
@@ -2047,7 +2047,7 @@ static long kvm_vm_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&data, argp, sizeof data))
                        goto out;
-               r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
+               r = kvm_irqfd(kvm, &data);
                break;
        }
        case KVM_IOEVENTFD: {
@@ -2845,6 +2845,7 @@ void kvm_exit(void)
        kvm_arch_hardware_unsetup();
        kvm_arch_exit();
        free_cpumask_var(cpus_hardware_enabled);
+       __free_page(fault_page);
        __free_page(hwpoison_page);
        __free_page(bad_page);
 }