Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 27 Jul 2014 16:53:01 +0000 (09:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 27 Jul 2014 16:53:01 +0000 (09:53 -0700)
Pull x86 fixes from Peter Anvin:
 "A couple of crash fixes, plus a fix that on 32 bits would cause a
  missing -ENOSYS for nonexistent system calls"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, cpu: Fix cache topology for early P4-SMT
  x86_32, entry: Store badsys error code in %eax
  x86, MCE: Robustify mcheck_init_device

146 files changed:
Documentation/input/event-codes.txt
MAINTAINERS
Makefile
arch/arm64/mm/init.c
arch/blackfin/configs/BF609-EZKIT_defconfig
arch/blackfin/kernel/vmlinux.lds.S
arch/blackfin/mach-bf533/boards/blackstamp.c
arch/blackfin/mach-bf537/boards/cm_bf537e.c
arch/blackfin/mach-bf537/boards/cm_bf537u.c
arch/blackfin/mach-bf537/boards/tcm_bf537.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf561/boards/acvilon.c
arch/blackfin/mach-bf561/boards/cm_bf561.c
arch/blackfin/mach-bf561/boards/ezkit.c
arch/blackfin/mach-bf609/boards/ezkit.c
arch/blackfin/mach-bf609/include/mach/pm.h
arch/blackfin/mach-bf609/pm.c
arch/blackfin/mach-common/ints-priority.c
arch/parisc/include/uapi/asm/signal.h
arch/parisc/mm/init.c
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_interrupts.S
arch/powerpc/kvm/book3s_rmhandlers.S
arch/powerpc/kvm/book3s_rtas.c
arch/powerpc/kvm/e500_mmu_host.c
arch/powerpc/lib/mem_64.S
arch/powerpc/lib/sstep.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/reconfig.c
arch/s390/include/asm/switch_to.h
arch/s390/kernel/head.S
arch/s390/kernel/ptrace.c
arch/s390/pci/pci.c
arch/sh/Makefile
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/sys32.S
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/x86/kvm/x86.c
arch/xtensa/kernel/vectors.S
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/mm/init.c
block/blk-cgroup.c
block/blk-tag.c
block/compat_ioctl.c
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/pata_ep93xx.c
drivers/block/drbd/drbd_nl.c
drivers/block/zram/zram_drv.c
drivers/firewire/ohci.c
drivers/gpio/gpio-rcar.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/hwmon/smsc47m192.c
drivers/ide/Kconfig
drivers/ide/ide-probe.c
drivers/input/input.c
drivers/input/keyboard/st-keyscan.c
drivers/input/misc/sirfsoc-onkey.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/ti_am335x_tsc.c
drivers/isdn/i4l/isdn_ppp.c
drivers/media/dvb-frontends/si2168.c
drivers/media/dvb-frontends/si2168_priv.h
drivers/media/dvb-frontends/tda10071.c
drivers/media/dvb-frontends/tda10071_priv.h
drivers/media/pci/saa7134/saa7134-empress.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_display.c
drivers/media/tuners/si2157.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/gspca/pac7302.c
drivers/media/usb/hdpvr/hdpvr-video.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/huawei_cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/wan/x25_asy.c
drivers/net/xen-netback/netback.c
drivers/parport/Kconfig
drivers/pinctrl/pinctrl-st.c
drivers/s390/char/raw3270.c
drivers/s390/crypto/ap_bus.c
drivers/staging/media/omap4iss/Kconfig
fs/coredump.c
fs/direct-io.c
fs/fuse/inode.c
fs/namei.c
fs/nfsd/nfs4xdr.c
fs/xattr.c
include/linux/libata.h
include/linux/pagemap.h
include/net/netfilter/nf_tables.h
include/net/netns/nftables.h
include/uapi/linux/fuse.h
kernel/trace/trace.c
kernel/trace/trace_clock.c
mm/hugetlb.c
mm/memory-failure.c
mm/memory.c
mm/migrate.c
mm/rmap.c
mm/shmem.c
mm/slab_common.c
mm/truncate.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/core/dev.c
net/dns_resolver/dns_query.c
net/ipv4/af_inet.c
net/ipv4/gre_offload.c
net/ipv4/ip_options.c
net/ipv4/tcp_offload.c
net/ipv6/tcpv6_offload.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/sched/cls_u32.c
sound/firewire/bebob/bebob_maudio.c

index f1ea2c69648dcad46d061a78ac33036d3ac5c727..c587a966413e8597da3241f851db92f6e6df1d81 100644 (file)
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
 If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
 device.
 
+INPUT_PROP_TOPBUTTONPAD:
+-----------------------
+Some laptops, most notably the Lenovo *40 series provide a trackstick
+device but do not have physical buttons associated with the trackstick
+device. Instead, the top area of the touchpad is marked to show
+visual/haptic areas for left, middle, right buttons intended to be used
+with the trackstick.
+
+If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
+accordingly. This property does not affect kernel behavior.
+The kernel does not provide button emulation for such devices but treats
+them as any other INPUT_PROP_BUTTONPAD device.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
index 61a8f486306b700749dce48b58ff63c856b3a5eb..86efa7e213c257e59f4285d30bad7555b259c566 100644 (file)
@@ -6956,6 +6956,12 @@ L:       linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/pinctrl/pinctrl-at91.c
 
+PIN CONTROLLER - RENESAS
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:     linux-sh@vger.kernel.org
+S:     Maintained
+F:     drivers/pinctrl/sh-pfc/
+
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <t.figa@samsung.com>
 M:     Thomas Abraham <thomas.abraham@linaro.org>
@@ -8019,6 +8025,16 @@ F:       drivers/ata/
 F:     include/linux/ata.h
 F:     include/linux/libata.h
 
+SERIAL ATA AHCI PLATFORM devices support
+M:     Hans de Goede <hdegoede@redhat.com>
+M:     Tejun Heo <tj@kernel.org>
+L:     linux-ide@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S:     Supported
+F:     drivers/ata/ahci_platform.c
+F:     drivers/ata/libahci_platform.c
+F:     include/linux/ahci_platform.h
+
 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
 M:     Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
 L:     linux-scsi@vger.kernel.org
index 6b2774145d664289cca4b1a1a3799543d5f916c2..5147f3f23504987e57b58b3afbe144d89f4504d0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -688,6 +688,8 @@ KBUILD_CFLAGS       += -fomit-frame-pointer
 endif
 endif
 
+KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
+
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS  += -g
 KBUILD_AFLAGS  += -Wa,-gdwarf-2
index f43db8a6926208f9b9419c3114d1c41c13514566..e90c5426fe14e5a1212706e3802aa32325c77d95 100644 (file)
@@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
 early_param("initrd", early_initrd);
 #endif
 
+/*
+ * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+static phys_addr_t max_zone_dma_phys(void)
+{
+       phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+       return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+}
+
 static void __init zone_sizes_init(unsigned long min, unsigned long max)
 {
        struct memblock_region *reg;
@@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA)) {
-               unsigned long max_dma_phys =
-                       (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
-               max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+               max_dma = PFN_DOWN(max_zone_dma_phys());
                zone_size[ZONE_DMA] = max_dma - min;
        }
        zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA))
-               dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1;
+               dma_phys_limit = max_zone_dma_phys();
        dma_contiguous_reserve(dma_phys_limit);
 
        memblock_allow_resize();
index a7e9bfd84183d5b7c26924679f9bb33d88ac0118..fcec5ce71392d20712b15516c3786e43513b0d10 100644 (file)
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN_V3=y
+CONFIG_SPI_ADI_V3=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index ba35864b2b74b50e3ad32527419a83ef6abb9629..c9eec84aa258628d1c9d70fc545420dde30c8bc1 100644 (file)
@@ -145,7 +145,7 @@ SECTIONS
 
        .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
 #else
-       .init.data : AT(__data_lma + __data_len)
+       .init.data : AT(__data_lma + __data_len + 32)
        {
                __sinitdata = .;
                INIT_DATA
index 63b0e4fe760cd558d20a2f8359c1c975180b9c11..0ccf0cf4daaf92f8d38aa18962defd8a10707e7d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index c65c6dbda3da9af86231d299b4502f0d3d72edb9..1e7290ef35258da56c5521ed23a1b637286b7342 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index af58454b4bff3fdc4bffda53d5d252726fd4e4f8..c7495dc74690db99f52bfa30c94c62dd5faae63c 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index a0211225748d23f0d0f14cdba4fd99a34aa8208f..6b988ad653d8d1e69a1721ddfe4b7208bf7cebb9 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 90138e6112c14b2f186640f22e61368a517fab94..1fe7ff286619f693c113faeac592ce3167d8de72 100644 (file)
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1",  "pinctrl-adi2.0", NULL, "can1"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0",  "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x",  "pinctrl-adi2.0", NULL, "atapi_alter"),
 #endif
        PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0",  "pinctrl-adi2.0", NULL, "nfc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", NULL, "keys_4x4"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "4bit",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "8bit",  "pinctrl-adi2.0", "keys_8x8grp", "keys"),
 };
 
 static int __init ezkit_init(void)
index 430b16d5ccb1124f6fd896d4c8651050695d9053..6ab951534d790b6060aeea63cda8c1b158d90470 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/jiffies.h>
 #include <linux/i2c-pca-platform.h>
 #include <linux/delay.h>
index 9f777df4cacce9e77ada5ec46ff1698a72fbe419..e862f7823e68db1b5ea374a295982c23db2800c8 100644 (file)
@@ -18,6 +18,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 88dee43e7abe8c4e34da9016b2333c539ef6b9a2..2de71e8c104b1e2233c7eba47c42ad28d2ab60c9 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/spi/spi.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/delay.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index 1ba4600de69f72b37ca503f7aab752b9f4dcdb97..e2c0b024ce88f2593f4551b711b3821d8ae3879e 100644 (file)
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 {
 #define CONFIG_SMC_GCTL_VAL     0x00000010
 
-       if (!devm_pinctrl_get_select_default(&pdev->dev))
-               return -EBUSY;
        bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
        bfin_write32(SMC_B0CTL, 0x01002011);
        bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 
 void bf609_nor_flash_exit(struct platform_device *pdev)
 {
-       devm_pinctrl_put(pdev->dev.pins->p);
        bfin_write32(SMC_GCTL, 0);
 }
 
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0",  "pinctrl-adi2.0", NULL, "smc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", NULL, "ppi2_16b"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_8b"),
-#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#else
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
-#endif
+       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "8bit",  "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "16bit",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit",  "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1",  "pinctrl-adi2.0", NULL, "sport1"),
index 3ca0fb965636ed3fad126ae021d9d87a6518ea8a..a1efd936dd30d2ec18e08c9d5b76bae6afabe99b 100644 (file)
@@ -10,6 +10,7 @@
 #define __MACH_BF609_PM_H__
 
 #include <linux/suspend.h>
+#include <linux/platform_device.h>
 
 extern int bfin609_pm_enter(suspend_state_t state);
 extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
 void bfin_sec_raise_irq(unsigned int sid);
 void coreb_enable(void);
 
-int bf609_nor_flash_init(void);
-void bf609_nor_flash_exit(void);
+int bf609_nor_flash_init(struct platform_device *pdev);
+void bf609_nor_flash_exit(struct platform_device *pdev);
 #endif
index 0cdd6955c7be5a80a2cb7d406b3fefb1eac9a9a9..b1bfcf434d16cfc25e999222c5c972f3f110d519 100644 (file)
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
 #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
 static int smc_pm_syscore_suspend(void)
 {
-       bf609_nor_flash_exit();
+       bf609_nor_flash_exit(NULL);
        return 0;
 }
 
 static void smc_pm_syscore_resume(void)
 {
-       bf609_nor_flash_init();
+       bf609_nor_flash_init(NULL);
 }
 
 static struct syscore_ops smc_pm_syscore_ops = {
index 867b7cef204cb91f0ed15e9386ba7ea0b2089d96..1f94784eab6d79b7f63673e20e2039fb41a07edb 100644 (file)
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
 
        bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
 
-       bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
-
        /* Enable interrupts IVG7-15 */
        bfin_irq_flags |= IMASK_IVG15 |
            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
index a2fa297196bc19f1de4f021ebec6e5338b237313..f5645d6a89f2c9c79e8e22cd9a7201df6dc389df 100644 (file)
@@ -69,8 +69,6 @@
 #define SA_NOMASK      SA_NODEFER
 #define SA_ONESHOT     SA_RESETHAND
 
-#define SA_RESTORER    0x04000000 /* obsolete -- ignored */
-
 #define MINSIGSTKSZ    2048
 #define SIGSTKSZ       8192
 
index ae085ad0fba03827df21874edfb6a6985c3338f8..0bef864264c0bb1e0d445c184ef79e51c9153caf 100644 (file)
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
 #endif
 
        empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
-       memset(empty_zero_page, 0, PAGE_SIZE);
 }
 
 static void __init gateway_init(void)
index bc2347774f0ad4ed111a5c98546fb763de6d8bbd..0fdd7eece6d91a3183a5857967ad910998d33c09 100644 (file)
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
            CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_CELL  (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
index fddb72b48ce9bd2a9a4e25460c2ff5cc4bc15ad5..d645428a65a411c187768bd296cc725e09fc2da6 100644 (file)
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
        return rb;
 }
 
-static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+                                            bool is_base_size)
 {
+
        int size, a_psize;
        /* Look at the 8 bit LP value */
        unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
                                continue;
 
                        a_psize = __hpte_actual_psize(lp, size);
-                       if (a_psize != -1)
+                       if (a_psize != -1) {
+                               if (is_base_size)
+                                       return 1ul << mmu_psize_defs[size].shift;
                                return 1ul << mmu_psize_defs[a_psize].shift;
+                       }
                }
 
        }
        return 0;
 }
 
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+       return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+       return __hpte_page_size(h, l, 1);
+}
+
 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
 {
        return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
index 807014dde821058429b41a5d825759d50d4d9ed7..c2b4dcf23d03768427fb5a42a134b86d02e8b0e8 100644 (file)
@@ -22,6 +22,7 @@
  */
 #include <asm/pgtable-ppc64.h>
 #include <asm/bug.h>
+#include <asm/processor.h>
 
 /*
  * Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
  */
 struct subpage_prot_table {
        unsigned long maxaddr;  /* only addresses < this are protected */
-       unsigned int **protptrs[2];
+       unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
        unsigned int *low_prot[4];
 };
 
index 9ea266eae33e235cde422eaaf8c86e9fc9275236..7e4612528546b710cb13d7b9346d394d5cd4d99e 100644 (file)
@@ -277,6 +277,8 @@ n:
        .globl n;       \
 n:
 
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
 #define _KPROBE(n)     \
        .section ".kprobes.text","a";   \
        .globl  n;      \
index 965291b4c2fa15a9f6f50cd8ca8b1e3a3067db02..0c157642c2a140a5be7cf26d677f5fae2fe05816 100644 (file)
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
+       {       /* Power8 DD1: Does not support doorbell IPIs */
+               .pvr_mask               = 0xffffff00,
+               .pvr_value              = 0x004d0100,
+               .cpu_name               = "POWER8 (raw)",
+               .cpu_features           = CPU_FTRS_POWER8_DD1,
+               .cpu_user_features      = COMMON_USER_POWER8,
+               .cpu_user_features2     = COMMON_USER2_POWER8,
+               .mmu_features           = MMU_FTRS_POWER8,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .pmc_type               = PPC_PMC_IBM,
+               .oprofile_cpu_type      = "ppc64/power8",
+               .oprofile_type          = PPC_OPROFILE_INVALID,
+               .cpu_setup              = __setup_cpu_power8,
+               .cpu_restore            = __restore_cpu_power8,
+               .flush_tlb              = __flush_tlb_power8,
+               .machine_check_early    = __machine_check_early_realmode_p8,
+               .platform               = "power8",
+       },
        {       /* Power8 */
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x004d0000,
index 80561074078d01ca8d101f7997a55699e8589e3f..68468d695f12ab864281f19a3cfd872a6976fc8d 100644 (file)
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                                goto out;
                        }
                        if (!rma_setup && is_vrma_hpte(v)) {
-                               unsigned long psize = hpte_page_size(v, r);
+                               unsigned long psize = hpte_base_page_size(v, r);
                                unsigned long senc = slb_pgsize_encoding(psize);
                                unsigned long lpcr;
 
index 6e6224318c36aaf166e33e7c57c8a8e0dcd9544e..5a24d3c2b6b8ce9bb39f59f4c69733b00ca068f6 100644 (file)
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
                        r = hpte[i+1];
 
                        /*
-                        * Check the HPTE again, including large page size
-                        * Since we don't currently allow any MPSS (mixed
-                        * page-size segment) page sizes, it is sufficient
-                        * to check against the actual page size.
+                        * Check the HPTE again, including base page size
                         */
                        if ((v & valid) && (v & mask) == val &&
-                           hpte_page_size(v, r) == (1ul << pshift))
+                           hpte_base_page_size(v, r) == (1ul << pshift))
                                /* Return with the HPTE still locked */
                                return (hash << 3) + (i >> 1);
 
index 868347ef09fd48bcf8bfd343becb49f6898887c5..558a67df8126434eb427c7ca24eebabb82676cac 100644 (file)
@@ -48,7 +48,7 @@
  *
  * LR = return address to continue at after eventually re-enabling MMU
  */
-_GLOBAL(kvmppc_hv_entry_trampoline)
+_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
        stdu    r1, -112(r1)
index e2c29e381dc7096d13af053526e0d98cf951d08e..d044b8b7c69dd6b675de969d30cc0345d104ba96 100644 (file)
 #include <asm/exception-64s.h>
 
 #if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name)             name
+#else
 #define FUNC(name)             GLUE(.,name)
+#endif
 #define GET_SHADOW_VCPU(reg)    addi   reg, r13, PACA_SVCPU
 
 #elif defined(CONFIG_PPC_BOOK3S_32)
index 9eec675220e621e029eac8ecf2a5dabfff17df5a..16c4d88ba27df9ed3caba2e37e20d02a40407dbc 100644 (file)
 
 #if defined(CONFIG_PPC_BOOK3S_64)
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name)             name
+#else
 #define FUNC(name)             GLUE(.,name)
+#endif
 
 #elif defined(CONFIG_PPC_BOOK3S_32)
 
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
  * On entry, r4 contains the guest shadow MSR
  * MSR.EE has to be 0 when calling this function
  */
-_GLOBAL(kvmppc_entry_trampoline)
+_GLOBAL_TOC(kvmppc_entry_trampoline)
        mfmsr   r5
        LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
        toreal(r7)
index edb14ba992b34fa74f3e7cf11c1b9a8a1b01e921..ef27fbd5d9c54694beac4d1b400dc7d512db38da 100644 (file)
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq, server, priority;
        int rc;
 
-       if (args->nargs != 3 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
-       server = args->args[1];
-       priority = args->args[2];
+       irq = be32_to_cpu(args->args[0]);
+       server = be32_to_cpu(args->args[1]);
+       priority = be32_to_cpu(args->args[2]);
 
        rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq, server, priority;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 3) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        server = priority = 0;
        rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
                goto out;
        }
 
-       args->rets[1] = server;
-       args->rets[2] = priority;
+       args->rets[1] = cpu_to_be32(server);
+       args->rets[2] = cpu_to_be32(priority);
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        rc = kvmppc_xics_int_off(vcpu->kvm, irq);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        rc = kvmppc_xics_int_on(vcpu->kvm, irq);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 #endif /* CONFIG_KVM_XICS */
 
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
        return rc;
 }
 
-static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
-       int i;
-
-       args->token = be32_to_cpu(args->token);
-       args->nargs = be32_to_cpu(args->nargs);
-       args->nret = be32_to_cpu(args->nret);
-       for (i = 0; i < args->nargs; i++)
-               args->args[i] = be32_to_cpu(args->args[i]);
-#endif
-}
-
-static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
-       int i;
-
-       for (i = 0; i < args->nret; i++)
-               args->args[i] = cpu_to_be32(args->args[i]);
-       args->token = cpu_to_be32(args->token);
-       args->nargs = cpu_to_be32(args->nargs);
-       args->nret = cpu_to_be32(args->nret);
-#endif
-}
-
 int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 {
        struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
        if (rc)
                goto fail;
 
-       kvmppc_rtas_swap_endian_in(&args);
-
        /*
         * args->rets is a pointer into args->args. Now that we've
         * copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
         * value so we can restore it on the way out.
         */
        orig_rets = args.rets;
-       args.rets = &args.args[args.nargs];
+       args.rets = &args.args[be32_to_cpu(args.nargs)];
 
        mutex_lock(&vcpu->kvm->lock);
 
        rc = -ENOENT;
        list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
-               if (d->token == args.token) {
+               if (d->token == be32_to_cpu(args.token)) {
                        d->handler->handler(vcpu, &args);
                        rc = 0;
                        break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 
        if (rc == 0) {
                args.rets = orig_rets;
-               kvmppc_rtas_swap_endian_out(&args);
                rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
                if (rc)
                        goto fail;
index dd2cc03f406f9a0e1f844473ca92c5560eca6c67..86903d3f5a033d215b3857f979fcea2cb68a3de6 100644 (file)
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                if (printk_ratelimit())
                        pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
                                __func__, (long)gfn, pfn);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
        kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
 
index 0738f96befbff76829e1119a5feefc915be39da8..43435c6892fb05d4ac2a732d30eef6e58b4bfaf8 100644 (file)
@@ -77,7 +77,7 @@ _GLOBAL(memset)
        stb     r4,0(r6)
        blr
 
-_GLOBAL(memmove)
+_GLOBAL_TOC(memmove)
        cmplw   0,r3,r4
        bgt     backwards_memcpy
        b       memcpy
index 412dd46dd0b7ea7136af14e8559e5d9b78d2c08a..5c09f365c84276161b75fd524fe270b2d43a1db2 100644 (file)
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = regs->gpr[rb] & 0x3f;
                        ival = (signed int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
-                       if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
+                       if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = rb;
                        ival = (signed int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> sh;
-                       if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+                       if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 #ifdef __powerpc64__
                case 27:        /* sld */
-                       sh = regs->gpr[rd] & 0x7f;
+                       sh = regs->gpr[rb] & 0x7f;
                        if (sh < 64)
                                regs->gpr[ra] = regs->gpr[rd] << sh;
                        else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = regs->gpr[rb] & 0x7f;
                        ival = (signed long int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
-                       if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
+                       if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = rb | ((instr & 2) << 4);
                        ival = (signed long int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> sh;
-                       if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+                       if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
index 022b38e6a80be83c62900419c027562600b0e02a..2d0b4d68a40a076f970fd458ab09d06785a43ff2 100644 (file)
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
        }
 
        of_node_set_flag(dn, OF_DYNAMIC);
+       of_node_init(dn);
 
        return dn;
 }
index 0435bb65d0aaf616d9cf4257b15e242d18d5d58c..1c0a60d988678ebf1f80f5ddc30beaa3f1f40094 100644 (file)
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
 
        np->properties = proplist;
        of_node_set_flag(np, OF_DYNAMIC);
+       of_node_init(np);
 
        np->parent = derive_parent(path);
        if (IS_ERR(np->parent)) {
index df38c70cd59ef295328068e432fa55b04e03651f..18ea9e3f8142954926b8fea3afe79ce90193a4b3 100644 (file)
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
                return 0;
 
        asm volatile(
-               "0:     lfpc    %1\n"
-               "       la      %0,0\n"
+               "       lfpc    %1\n"
+               "0:     la      %0,0\n"
                "1:\n"
                EX_TABLE(0b,1b)
                : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
index 7ba7d6784510c257bbadc72d8f9cdf58151b8e5b..e88d35d749501e43b296e8794351f691368dcf5e 100644 (file)
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
 
 #if defined(CONFIG_64BIT)
 #if defined(CONFIG_MARCH_ZEC12)
-       .long 3, 0xc100efea, 0xf46ce800, 0x00400000
+       .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
 #elif defined(CONFIG_MARCH_Z196)
-       .long 2, 0xc100efea, 0xf46c0000
+       .long 2, 0xc100eff2, 0xf46c0000
 #elif defined(CONFIG_MARCH_Z10)
-       .long 2, 0xc100efea, 0xf0680000
+       .long 2, 0xc100eff2, 0xf0680000
 #elif defined(CONFIG_MARCH_Z9_109)
        .long 1, 0xc100efc2
 #elif defined(CONFIG_MARCH_Z990)
index 2d716734b5b1b2b482e580aee483d07b014c301f..5dc7ad9e2fbf2d1b035194b30db18a82957c1358 100644 (file)
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                        unsigned long mask = PSW_MASK_USER;
 
                        mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
-                       if ((data & ~mask) != PSW_USER_BITS)
+                       if ((data ^ PSW_USER_BITS) & ~mask)
+                               /* Invalid psw mask. */
+                               return -EINVAL;
+                       if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
+                               /* Invalid address-space-control bits */
                                return -EINVAL;
                        if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+                               /* Invalid addressing mode bits */
                                return -EINVAL;
                }
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
 
                        mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
                        /* Build a 64 bit psw mask from 31 bit mask. */
-                       if ((tmp & ~mask) != PSW32_USER_BITS)
+                       if ((tmp ^ PSW32_USER_BITS) & ~mask)
                                /* Invalid psw mask. */
                                return -EINVAL;
+                       if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
+                               /* Invalid address-space-control bits */
+                               return -EINVAL;
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
                                (regs->psw.mask & PSW_MASK_BA) |
                                (__u64)(tmp & mask) << 32;
index 9ddc51eeb8d690410aa6fbf98e1a36a6d544e4db..30de42730b2f20096890cf4022023a4f1b292884 100644 (file)
 static LIST_HEAD(zpci_list);
 static DEFINE_SPINLOCK(zpci_list_lock);
 
-static void zpci_enable_irq(struct irq_data *data);
-static void zpci_disable_irq(struct irq_data *data);
-
 static struct irq_chip zpci_irq_chip = {
        .name = "zPCI",
-       .irq_unmask = zpci_enable_irq,
-       .irq_mask = zpci_disable_irq,
+       .irq_unmask = unmask_msi_irq,
+       .irq_mask = mask_msi_irq,
 };
 
 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
        return rc;
 }
 
-static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
-{
-       int offset, pos;
-       u32 mask_bits;
-
-       if (msi->msi_attrib.is_msix) {
-               offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
-                       PCI_MSIX_ENTRY_VECTOR_CTRL;
-               msi->masked = readl(msi->mask_base + offset);
-               writel(flag, msi->mask_base + offset);
-       } else if (msi->msi_attrib.maskbit) {
-               pos = (long) msi->mask_base;
-               pci_read_config_dword(msi->dev, pos, &mask_bits);
-               mask_bits &= ~(mask);
-               mask_bits |= flag & mask;
-               pci_write_config_dword(msi->dev, pos, mask_bits);
-       } else
-               return 0;
-
-       msi->msi_attrib.maskbit = !!flag;
-       return 1;
-}
-
-static void zpci_enable_irq(struct irq_data *data)
-{
-       struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
-       zpci_msi_set_mask_bits(msi, 1, 0);
-}
-
-static void zpci_disable_irq(struct irq_data *data)
-{
-       struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
-       zpci_msi_set_mask_bits(msi, 1, 1);
-}
-
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
 }
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
 
        /* Release MSI interrupts */
        list_for_each_entry(msi, &pdev->msi_list, list) {
-               zpci_msi_set_mask_bits(msi, 1, 1);
+               if (msi->msi_attrib.is_msix)
+                       default_msix_mask_irq(msi, 1);
+               else
+                       default_msi_mask_irq(msi, 1, 1);
                irq_set_msi_desc(msi->irq, NULL);
                irq_free_desc(msi->irq);
                msi->msg.address_lo = 0;
index d4d16e4be07c246d5941c799e9f4cff5d091d702..bf5b3f5f496239740d0fc451e6c7806d2c2a59fc 100644 (file)
@@ -32,7 +32,8 @@ endif
 
 cflags-$(CONFIG_CPU_SH2)               := $(call cc-option,-m2,)
 cflags-$(CONFIG_CPU_SH2A)              += $(call cc-option,-m2a,) \
-                                          $(call cc-option,-m2a-nofpu,)
+                                          $(call cc-option,-m2a-nofpu,) \
+                                          $(call cc-option,-m4-nofpu,)
 cflags-$(CONFIG_CPU_SH3)               := $(call cc-option,-m3,)
 cflags-$(CONFIG_CPU_SH4)               := $(call cc-option,-m4,) \
        $(call cc-option,-mno-implicit-fp,-m4-nofpu)
index b73274fb961a27a9b54eb5fd1719650762fef305..42f2bca1d338c231c63c6f976b647336459b1435 100644 (file)
 #define __NR_finit_module      342
 #define __NR_sched_setattr     343
 #define __NR_sched_getattr     344
+#define __NR_renameat2         345
 
-#define NR_syscalls            345
+#define NR_syscalls            346
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index d066eb18650c1598f898f7a4314bdd7ed5b606a7..f834224208ed8ca73d0b12f9d5ad320efd293ca8 100644 (file)
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
 SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
 SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
 SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
 
        .globl          sys32_mmap2
 sys32_mmap2:
index 151ace8766cc2d99d5be3552a4b2014eb74fdaa6..85fe9b1087cdb3eae326a46337f5484ca6d9a636 100644 (file)
@@ -86,3 +86,4 @@ sys_call_table:
 /*330*/        .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/        .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+/*345*/        .long sys_renameat2
index 4bd4e2bb26cf4d35a8980f1a9040cdd88a645a4a..33ecba2826ea20b65693d90649a791db91fd4fcd 100644 (file)
@@ -87,6 +87,7 @@ sys_call_table32:
 /*330*/        .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+       .word sys32_renameat2
 
 #endif /* CONFIG_COMPAT */
 
@@ -165,3 +166,4 @@ sys_call_table:
 /*330*/        .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
        .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+       .word sys_renameat2
index f6449334ec4514741f7f79481e419d30442d1ac2..ef432f891d30a69468a8093c2bd6f33ca1a7d54f 100644 (file)
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
                        kvm_x86_ops->set_nmi(vcpu);
                }
        } else if (kvm_cpu_has_injectable_intr(vcpu)) {
+               /*
+                * Because interrupts can be injected asynchronously, we are
+                * calling check_nested_events again here to avoid a race condition.
+                * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
+                * proposal and current concerns.  Perhaps we should be setting
+                * KVM_REQ_EVENT only on certain events and not unconditionally?
+                */
+               if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
+                       r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+                       if (r != 0)
+                               return r;
+               }
                if (kvm_x86_ops->interrupt_allowed(vcpu)) {
                        kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
                                            false);
index f9e1ec346e359c7410482c5d8dfd5a9de88bfafc..8453e6e398951b0d1864b6e570c8d7d63e45d356 100644 (file)
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
        beqz    a2, 1f          # if at start of vector, don't restore
 
        addi    a0, a0, -128
-       bbsi    a0, 8, 1f       # don't restore except for overflow 8 and 12
-       bbsi    a0, 7, 2f
+       bbsi.l  a0, 8, 1f       # don't restore except for overflow 8 and 12
+
+       /*
+        * This fixup handler is for the extremely unlikely case where the
+        * overflow handler's reference thru a0 gets a hardware TLB refill
+        * that bumps out the (distinct, aliasing) TLB entry that mapped its
+        * prior references thru a9/a13, and where our reference now thru
+        * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+        */
+       movi    a2, window_overflow_restore_a0_fixup
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+
+       bbsi.l  a0, 7, 2f
 
        /*
         * Restore a0 as saved by _WindowOverflow8().
-        *
-        * FIXME:  we really need a fixup handler for this L32E,
-        * for the extremely unlikely case where the overflow handler's
-        * reference thru a0 gets a hardware TLB refill that bumps out
-        * the (distinct, aliasing) TLB entry that mapped its prior
-        * references thru a9, and where our reference now thru a9
-        * gets a 2nd-level miss exception (not hardware TLB refill).
         */
 
-       l32e    a2, a9, -16
-       wsr     a2, depc        # replace the saved a0
-       j       1f
+       l32e    a0, a9, -16
+       wsr     a0, depc        # replace the saved a0
+       j       3f
 
 2:
        /*
         * Restore a0 as saved by _WindowOverflow12().
-        *
-        * FIXME:  we really need a fixup handler for this L32E,
-        * for the extremely unlikely case where the overflow handler's
-        * reference thru a0 gets a hardware TLB refill that bumps out
-        * the (distinct, aliasing) TLB entry that mapped its prior
-        * references thru a13, and where our reference now thru a13
-        * gets a 2nd-level miss exception (not hardware TLB refill).
         */
 
-       l32e    a2, a13, -16
-       wsr     a2, depc        # replace the saved a0
+       l32e    a0, a13, -16
+       wsr     a0, depc        # replace the saved a0
+3:
+       xsr     a3, excsave1
+       movi    a0, 0
+       s32i    a0, a3, EXC_TABLE_FIXUP
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
 1:
        /*
         * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
 
        s32i    a0, a2, PT_DEPC
 
+_DoubleExceptionVector_handle_exception:
        addx4   a0, a0, a3
        l32i    a0, a0, EXC_TABLE_FAST_USER
        xsr     a3, excsave1
@@ -464,10 +469,119 @@ _DoubleExceptionVector_WindowOverflow:
        rotw    -3
        j       1b
 
-       .end literal_prefix
 
 ENDPROC(_DoubleExceptionVector)
 
+/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ *   (we'll need to rotate window back and there's no place to save this
+ *    information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ *   about the conditions of the original double exception that happened in
+ *   the window overflow handler is lost, so we just return to userspace to
+ *   retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+       rsr     a0, ps
+       extui   a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+       rsr     a2, windowbase
+       sub     a0, a2, a0
+       extui   a0, a0, 0, 3
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+
+       _beqi   a0, 1, .Lhandle_1
+       _beqi   a0, 3, .Lhandle_3
+
+       .macro  overflow_fixup_handle_exception_pane n
+
+       rsr     a0, depc
+       rotw    -\n
+
+       xsr     a3, excsave1
+       wsr     a2, depc
+       l32i    a2, a3, EXC_TABLE_KSTK
+       s32i    a0, a2, PT_AREG0
+
+       movi    a0, .Lrestore_\n
+       s32i    a0, a2, PT_DEPC
+       rsr     a0, exccause
+       j       _DoubleExceptionVector_handle_exception
+
+       .endm
+
+       overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+       overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+       overflow_fixup_handle_exception_pane 3
+
+       .macro  overflow_fixup_restore_a0_pane n
+
+       rotw    \n
+       /* Need to preserve a0 value here to be able to handle exception
+        * that may occur on a0 reload from stack. It may occur because
+        * TLB miss handler may not be atomic and pointer to page table
+        * may be lost before we get here. There are no free registers,
+        * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+        */
+       xsr     a3, excsave1
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       movi    a2, window_overflow_restore_a0_fixup
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+       bbsi.l  a0, 7, 1f
+       l32e    a0, a9, -16
+       j       2f
+1:
+       l32e    a0, a13, -16
+2:
+       rotw    -\n
+
+       .endm
+
+.Lrestore_2:
+       overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+       xsr     a3, excsave1
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       movi    a2, 0
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+       rfe
+
+.Lrestore_1:
+       overflow_fixup_restore_a0_pane 1
+       j       .Lset_default_fixup
+.Lrestore_3:
+       overflow_fixup_restore_a0_pane 3
+       j       .Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+       .end literal_prefix
 /*
  * Debug interrupt vector
  *
index ee32c0085dff4296fb9290e3706733db9402460d..d16db6df86f8e3d823ac2f189816d9889249323e 100644 (file)
@@ -269,13 +269,13 @@ SECTIONS
                  .UserExceptionVector.literal)
   SECTION_VECTOR (_DoubleExceptionVector_literal,
                  .DoubleExceptionVector.literal,
-                 DOUBLEEXC_VECTOR_VADDR - 16,
+                 DOUBLEEXC_VECTOR_VADDR - 40,
                  SIZEOF(.UserExceptionVector.text),
                  .UserExceptionVector.text)
   SECTION_VECTOR (_DoubleExceptionVector_text,
                  .DoubleExceptionVector.text,
                  DOUBLEEXC_VECTOR_VADDR,
-                 32,
+                 40,
                  .DoubleExceptionVector.literal)
 
   . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
index 4224256bb215f17c52d91662f186ecb250dee361..77ed20209ca57232dd79afa60fe91210515746bf 100644 (file)
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
                return -EINVAL;
        }
 
-       if (it && start - it->start < bank_sz) {
+       if (it && start - it->start <= bank_sz) {
                if (start == it->start) {
                        if (end - it->start < bank_sz) {
                                it->start = end;
index b9f4cc494ecefbf2560483bdc4685bb98d59b5d3..28d227c5ca7781aed5bb15cf37d3b0f1dcaa8d79 100644 (file)
@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
 
+       /*
+        * @q could be exiting and already have destroyed all blkgs as
+        * indicated by NULL root_blkg.  If so, don't confuse policies.
+        */
+       if (!q->root_blkg)
+               return;
+
        blk_throtl_drain(q);
 }
 
index 3f33d86722688a4f50ac0064488540d1960447fb..a185b86741e5ff80dccbc5223eb570f764199d86 100644 (file)
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 EXPORT_SYMBOL(blk_queue_find_tag);
 
 /**
- * __blk_free_tags - release a given set of tag maintenance info
+ * blk_free_tags - release a given set of tag maintenance info
  * @bqt:       the tag map to free
  *
- * Tries to free the specified @bqt.  Returns true if it was
- * actually freed and false if there are still references using it
+ * Drop the reference count on @bqt and frees it when the last reference
+ * is dropped.
  */
-static int __blk_free_tags(struct blk_queue_tag *bqt)
+void blk_free_tags(struct blk_queue_tag *bqt)
 {
-       int retval;
-
-       retval = atomic_dec_and_test(&bqt->refcnt);
-       if (retval) {
+       if (atomic_dec_and_test(&bqt->refcnt)) {
                BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
                                                        bqt->max_depth);
 
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
 
                kfree(bqt);
        }
-
-       return retval;
 }
+EXPORT_SYMBOL(blk_free_tags);
 
 /**
  * __blk_queue_free_tags - release tag maintenance info
@@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q)
        if (!bqt)
                return;
 
-       __blk_free_tags(bqt);
+       blk_free_tags(bqt);
 
        q->queue_tags = NULL;
        queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 }
 
-/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt:       the tag map to free
- *
- * For externally managed @bqt frees the map.  Callers of this
- * function must guarantee to have released all the queues that
- * might have been using this tag map.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
-       if (unlikely(!__blk_free_tags(bqt)))
-               BUG();
-}
-EXPORT_SYMBOL(blk_free_tags);
-
 /**
  * blk_queue_free_tags - release tag maintenance info
  * @q:  the request queue for the device
index fbd5a67cb773886104cac49698ee589b8fbc9933..a0926a6094b28a7e4e67b3a88afc993719294405 100644 (file)
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case BLKROSET:
        case BLKDISCARD:
        case BLKSECDISCARD:
+       case BLKZEROOUT:
        /*
         * the ones below are implemented in blkdev_locked_ioctl,
         * but we call blkdev_ioctl, which gets the lock for us
index dae5607e11153a7de89dc5cbec9ba85fda080ba9..4cd52a4541a96c44fe367bb274484f282287ab20 100644 (file)
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 
        /* Promise */
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
+       { PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
 
        /* Asmedia */
        { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },   /* ASM1060 */
index 18d97d5c7d90f216d76abd9033346a734ae8d9ed..677c0c1b03bd658322cad2faf5becd86ce1db3cd 100644 (file)
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
  *     ata_qc_new - Request an available ATA command, for queueing
  *     @ap: target port
  *
+ *     Some ATA host controllers may implement a queue depth which is less
+ *     than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ *     the hardware limitation.
+ *
  *     LOCKING:
  *     None.
  */
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
 {
        struct ata_queued_cmd *qc = NULL;
+       unsigned int max_queue = ap->host->n_tags;
        unsigned int i, tag;
 
        /* no command while frozen */
        if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
                return NULL;
 
-       for (i = 0; i < ATA_MAX_QUEUE; i++) {
-               tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+       for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+               tag = tag < max_queue ? tag : 0;
 
                /* the last tag is reserved for internal command. */
                if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
 {
        spin_lock_init(&host->lock);
        mutex_init(&host->eh_mutex);
+       host->n_tags = ATA_MAX_QUEUE - 1;
        host->dev = dev;
        host->ops = ops;
 }
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 {
        int i, rc;
 
+       host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
        /* host must have been started */
        if (!(host->flags & ATA_HOST_STARTED)) {
                dev_err(host->dev, "BUG: trying to register unstarted host\n");
index 6760fc4e85b8c809e39655fd92adc83871e75fce..dad83df555c49d94534a544483e20aa0a9767635 100644 (file)
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
        case ATA_DEV_ATA:
                if (err & ATA_ICRC)
                        qc->err_mask |= AC_ERR_ATA_BUS;
-               if (err & ATA_UNC)
+               if (err & (ATA_UNC | ATA_AMNF))
                        qc->err_mask |= AC_ERR_MEDIA;
                if (err & ATA_IDNF)
                        qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
                }
 
                if (cmd->command != ATA_CMD_PACKET &&
-                   (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
-                                    ATA_ABORTED)))
-                       ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
+                   (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
+                                    ATA_IDNF | ATA_ABORTED)))
+                       ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
                          res->feature & ATA_ICRC ? "ICRC " : "",
                          res->feature & ATA_UNC ? "UNC " : "",
+                         res->feature & ATA_AMNF ? "AMNF " : "",
                          res->feature & ATA_IDNF ? "IDNF " : "",
                          res->feature & ATA_ABORTED ? "ABRT " : "");
 #endif
index 6ad5c072ce348913d8c511574db503a37e798c9c..4d37c5415fc7fec23ba0120a917d706362709770 100644 (file)
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
        struct ep93xx_pata_data *drv_data;
        struct ata_host *host;
        struct ata_port *ap;
-       unsigned int irq;
+       int irq;
        struct resource *mem_res;
        void __iomem *ide_base;
        int err;
index 1b35c45c92b75d7f01fd3fcda7a861ae099b4c7e..3f2e1673808053a4de70077735a67723f9955b64 100644 (file)
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
        struct task_struct *opa;
 
        kref_get(&connection->kref);
+       /* We may just have force_sig()'ed this thread
+        * to get it out of some blocking network function.
+        * Clear signals; otherwise kthread_run(), which internally uses
+        * wait_on_completion_killable(), will mistake our pending signal
+        * for a new fatal signal and fail. */
+       flush_signals(current);
        opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
        if (IS_ERR(opa)) {
                drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
index 089e72cd37bea051bf9f9758644fd8b79ea038cc..36e54be402df30b68c579e09c588193f5f015e00 100644 (file)
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
        memset(&zram->stats, 0, sizeof(zram->stats));
 
        zram->disksize = 0;
-       if (reset_capacity) {
+       if (reset_capacity)
                set_capacity(zram->disk, 0);
-               revalidate_disk(zram->disk);
-       }
+
        up_write(&zram->init_lock);
+
+       /*
+        * Revalidate disk out of the init_lock to avoid lockdep splat.
+        * It's okay because disk's capacity is protected by init_lock
+        * so that revalidate_disk always sees up-to-date capacity.
+        */
+       if (reset_capacity)
+               revalidate_disk(zram->disk);
 }
 
 static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
        zram->comp = comp;
        zram->disksize = disksize;
        set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
-       revalidate_disk(zram->disk);
        up_write(&zram->init_lock);
+
+       /*
+        * Revalidate disk out of the init_lock to avoid lockdep splat.
+        * It's okay because disk's capacity is protected by init_lock
+        * so that revalidate_disk always sees up-to-date capacity.
+        */
+       revalidate_disk(zram->disk);
+
        return len;
 
 out_destroy_comp:
index 57985410f12f7ea9770818457e2d9550d815a3dd..a66a3217f1d92f939fa4f6f2b1ee7a7ee976488f 100644 (file)
@@ -336,10 +336,10 @@ static const struct {
                QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
-               QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+               QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
-               0},
+               QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
index 0c9f803fc1acdb3fe9e6798ce506e1ec21a41680..b6ae89ea88119b2b1d35b522f64e07470d20a8dc 100644 (file)
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
 
 static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
        .map    = gpio_rcar_irq_domain_map,
+       .xlate  = irq_domain_xlate_twocell,
 };
 
 struct gpio_rcar_info {
index f36126383d260166a950ad20c72b69637246c550..d893e4da5dcef9cc0c30d873f6f3d6033a27a03d 100644 (file)
@@ -1616,22 +1616,6 @@ out:
        return ret;
 }
 
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
-       struct i915_vma *vma;
-
-       /*
-        * Only the global gtt is relevant for gtt memory mappings, so restrict
-        * list traversal to objects bound into the global address space. Note
-        * that the active list should be empty, but better safe than sorry.
-        */
-       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
-       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-}
-
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        obj->fault_mappable = false;
 }
 
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct drm_i915_gem_object *obj;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+               i915_gem_release_mmap(obj);
+}
+
 uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
index 3521f998a1788488b8c396860f6433ffacb81ae2..34894b57306401645a2184d48d99ed2966c8c9f1 100644 (file)
@@ -31,7 +31,7 @@
 struct i915_render_state {
        struct drm_i915_gem_object *obj;
        unsigned long ggtt_offset;
-       void *batch;
+       u32 *batch;
        u32 size;
        u32 len;
 };
@@ -80,7 +80,7 @@ free:
 
 static void render_state_free(struct i915_render_state *so)
 {
-       kunmap(so->batch);
+       kunmap(kmap_to_page(so->batch));
        i915_gem_object_ggtt_unpin(so->obj);
        drm_gem_object_unreference(&so->obj->base);
        kfree(so);
index 267f069765adedffb942d1352649502ac624a364..c05c84f3f091382729b5cfcfa867471faed06a83 100644 (file)
@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_engine_cs *signaller;
-       u32 seqno, ctl;
+       u32 seqno;
 
        ring->hangcheck.deadlock++;
 
@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
        if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
                return -1;
 
-       /* cursory check for an unkickable deadlock */
-       ctl = I915_READ_CTL(signaller);
-       if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
-               return -1;
-
        if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
                return 1;
 
-       if (signaller->hangcheck.deadlock)
+       /* cursory check for an unkickable deadlock */
+       if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+           semaphore_passed(signaller) < 0)
                return -1;
 
        return 0;
index 0b2471107137a90af6677ece89a7351c9b97f599..c0ea66192fe03a8fe977ac0aea977a07204530c4 100644 (file)
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
                                gb_tile_moden = 0;
                                break;
                        }
+                       rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
        } else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index 250bac3935a43954c4fff467cdd7da694e1d57c3..15e4f28015e1e74fcc0a3e6189feee1274fa1794 100644 (file)
@@ -4756,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index c66952d4b00cc5d706a13650fe2d82ef71389f1d..3c69f58e46efd94b02a440e15e8aad66f9241712 100644 (file)
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index b7204500a9a6970f863c09fe82aa495975b4092a..60c47f8291222369f7f6070953eea7bda1a25ce9 100644 (file)
@@ -449,6 +449,7 @@ struct radeon_bo_va {
 
        /* protected by vm mutex */
        struct list_head                vm_list;
+       struct list_head                vm_status;
 
        /* constant after initialization */
        struct radeon_vm                *vm;
@@ -867,6 +868,9 @@ struct radeon_vm {
        struct list_head                va;
        unsigned                        id;
 
+       /* BOs freed, but not yet updated in the PT */
+       struct list_head                freed;
+
        /* contains the page directory */
        struct radeon_bo                *page_directory;
        uint64_t                        pd_gpu_addr;
@@ -875,6 +879,8 @@ struct radeon_vm {
        /* array of page tables, one for each page directory entry */
        struct radeon_vm_pt             *page_tables;
 
+       struct radeon_bo_va             *ib_bo_va;
+
        struct mutex                    mutex;
        /* last fence for cs using this vm */
        struct radeon_fence             *fence;
@@ -2832,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
 int radeon_vm_update_page_directory(struct radeon_device *rdev,
                                    struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm);
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem);
 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
                             struct radeon_bo *bo);
@@ -2847,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                          struct radeon_bo_va *bo_va,
                          uint64_t offset,
                          uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va);
 
 /* audio */
 void r600_audio_update_hdmi(struct work_struct *work);
index 71a143461478a60f5d9bc8428a0276c38b78953b..ae763f60c8a0a23f551bffb5119770d94527931a 100644 (file)
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                                   struct radeon_vm *vm)
 {
        struct radeon_device *rdev = p->rdev;
+       struct radeon_bo_va *bo_va;
        int i, r;
 
        r = radeon_vm_update_page_directory(rdev, vm);
        if (r)
                return r;
 
-       r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+       r = radeon_vm_clear_freed(rdev, vm);
+       if (r)
+               return r;
+
+       if (vm->ib_bo_va == NULL) {
+               DRM_ERROR("Tmp BO not in VM!\n");
+               return -EINVAL;
+       }
+
+       r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
                                &rdev->ring_tmp_bo.bo->tbo.mem);
        if (r)
                return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                        continue;
 
                bo = p->relocs[i].robj;
-               r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+               bo_va = radeon_vm_bo_find(vm, bo);
+               if (bo_va == NULL) {
+                       dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+                       return -EINVAL;
+               }
+
+               r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
                if (r)
                        return r;
        }
index 03686fab842d3c2e0de237fa6fb027895a025a80..697add2cd4e34e89b6276659142f476ccf0c1e64 100644 (file)
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
        if (!radeon_check_pot_argument(radeon_vm_size)) {
                dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
-       if (radeon_vm_size < 4) {
-               dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+       if (radeon_vm_size < 1) {
+               dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /*
         * Max GPUVM size for Cayman, SI and CI are 40 bits.
         */
-       if (radeon_vm_size > 1024*1024) {
-               dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+       if (radeon_vm_size > 1024) {
+               dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /* defines number of bits in page table versus page directory,
         * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
         * page table and the remaining bits are in the page directory */
        if (radeon_vm_block_size < 9) {
-               dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+               dev_warn(rdev->dev, "VM page table size (%d) too small\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
 
        if (radeon_vm_block_size > 24 ||
-           radeon_vm_size < (1ull << radeon_vm_block_size)) {
-               dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+           (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+               dev_warn(rdev->dev, "VM page table size (%d) too large\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
        /* Adjust VM size here.
         * Max GPUVM size for cayman+ is 40 bits.
         */
-       rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+       rdev->vm_manager.max_pfn = radeon_vm_size << 18;
 
        /* Set asic functions */
        r = radeon_asic_init(rdev);
index cb1421369e3a2eaa111a8c6d5f3b1e248e72beec..e9e361084249c7deddd0dd680ced4d95a845d31d 100644 (file)
@@ -173,7 +173,7 @@ int radeon_dpm = -1;
 int radeon_aspm = -1;
 int radeon_runtime_pm = -1;
 int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
 int radeon_vm_block_size = 9;
 int radeon_deep_color = 0;
 
@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
 MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
 module_param_named(hard_reset, radeon_hard_reset, int, 0444);
 
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
 module_param_named(vm_size, radeon_vm_size, int, 0444);
 
 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
index 35d931881b4b80bb5f6989580e993af8796e7041..d25ae6acfd5a05d3bc3ab9775cc3dc604f27554a 100644 (file)
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN) {
                struct radeon_fpriv *fpriv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm;
                int r;
 
                fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return -ENOMEM;
                }
 
-               r = radeon_vm_init(rdev, &fpriv->vm);
+               vm = &fpriv->vm;
+               r = radeon_vm_init(rdev, vm);
                if (r) {
                        kfree(fpriv);
                        return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
 
                        /* map the ib pool buffer read only into
                         * virtual address space */
-                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                                rdev->ring_tmp_bo.bo);
-                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                       vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+                                                       rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+                                                 RADEON_VA_IB_OFFSET,
                                                  RADEON_VM_PAGE_READABLE |
                                                  RADEON_VM_PAGE_SNOOPED);
 
                        radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
                struct radeon_fpriv *fpriv = file_priv->driver_priv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm = &fpriv->vm;
                int r;
 
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (!r) {
-                               bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                         rdev->ring_tmp_bo.bo);
-                               if (bo_va)
-                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               if (vm->ib_bo_va)
+                                       radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
                                radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        }
                }
 
-               radeon_vm_fini(rdev, &fpriv->vm);
+               radeon_vm_fini(rdev, vm);
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
index eecff6bbd34145c6bf975ca0f2f19ff9af8534ba..725d3669014f8ef2bbd61f637d5e3e98de2772eb 100644 (file)
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
        bo_va->ref_count = 1;
        INIT_LIST_HEAD(&bo_va->bo_list);
        INIT_LIST_HEAD(&bo_va->vm_list);
+       INIT_LIST_HEAD(&bo_va->vm_status);
 
        mutex_lock(&vm->mutex);
        list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                head = &tmp->vm_list;
        }
 
+       if (bo_va->soffset) {
+               /* add a clone of the bo_va to clear the old address */
+               tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+               if (!tmp) {
+                       mutex_unlock(&vm->mutex);
+                       return -ENOMEM;
+               }
+               tmp->soffset = bo_va->soffset;
+               tmp->eoffset = bo_va->eoffset;
+               tmp->vm = vm;
+               list_add(&tmp->vm_status, &vm->freed);
+       }
+
        bo_va->soffset = soffset;
        bo_va->eoffset = eoffset;
        bo_va->flags = flags;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
  * Object have to be reserved and mutex must be locked!
  */
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem)
 {
+       struct radeon_vm *vm = bo_va->vm;
        struct radeon_ib ib;
-       struct radeon_bo_va *bo_va;
        unsigned nptes, ndw;
        uint64_t addr;
        int r;
 
-       bo_va = radeon_vm_bo_find(vm, bo);
-       if (bo_va == NULL) {
-               dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
-               return -EINVAL;
-       }
 
        if (!bo_va->soffset) {
                dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
-                       bo, vm);
+                       bo_va->bo, vm);
                return -EINVAL;
        }
 
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 
        trace_radeon_vm_bo_update(bo_va);
 
-       nptes = radeon_bo_ngpu_pages(bo);
+       nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
 
        /* padding, etc. */
        ndw = 64;
@@ -910,6 +918,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        return 0;
 }
 
+/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       int r;
+
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+               list_del(&bo_va->vm_status);
+               r = radeon_vm_bo_update(rdev, bo_va, NULL);
+               kfree(bo_va);
+               if (r)
+                       return r;
+       }
+       return 0;
+
+}
+
 /**
  * radeon_vm_bo_rmv - remove a bo to a specific vm
  *
@@ -917,27 +953,27 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
  * @bo_va: requested bo_va
  *
  * Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
  *
  * Object have to be reserved!
  */
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va)
 {
-       int r = 0;
+       struct radeon_vm *vm = bo_va->vm;
 
-       mutex_lock(&bo_va->vm->mutex);
-       if (bo_va->soffset)
-               r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+       list_del(&bo_va->bo_list);
 
+       mutex_lock(&vm->mutex);
        list_del(&bo_va->vm_list);
-       mutex_unlock(&bo_va->vm->mutex);
-       list_del(&bo_va->bo_list);
 
-       kfree(bo_va);
-       return r;
+       if (bo_va->soffset) {
+               bo_va->bo = NULL;
+               list_add(&bo_va->vm_status, &vm->freed);
+       } else {
+               kfree(bo_va);
+       }
+
+       mutex_unlock(&vm->mutex);
 }
 
 /**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        int r;
 
        vm->id = 0;
+       vm->ib_bo_va = NULL;
        vm->fence = NULL;
        vm->last_flush = NULL;
        vm->last_id_use = NULL;
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->va);
+       INIT_LIST_HEAD(&vm->freed);
 
        pd_size = radeon_vm_directory_size(rdev);
        pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
                        kfree(bo_va);
                }
        }
-
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+               kfree(bo_va);
 
        for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
                radeon_bo_unref(&vm->page_tables[i].bo);
index eba0225259a457a3d494f39bfee13af0cdc865e6..9e854fd016dabac99c081896209ce9f732dc34f0 100644 (file)
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index 20da6ff183df9b6fb5b59554616a30f28aeeb96f..32e50be9c4ac1c41969fd9de2d1a6a3439a6ef27 100644 (file)
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
                pi->at[i] = TRINITY_AT_DFLT;
 
-       /* There are stability issues reported on latops with
-        * bapm installed when switching between AC and battery
-        * power.  At the same time, some desktop boards hang
-        * if it's not enabled and dpm is enabled.
+       /* There are stability issues reported on with
+        * bapm enabled when switching between AC and battery
+        * power.  At the same time, some MSI boards hang
+        * if it's not enabled and dpm is enabled.  Just enable
+        * it for MSI boards right now.
         */
-       if (rdev->flags & RADEON_IS_MOBILITY)
-               pi->enable_bapm = false;
-       else
+       if (rdev->pdev->subsystem_vendor == 0x1462)
                pi->enable_bapm = true;
+       else
+               pi->enable_bapm = false;
        pi->enable_nbps_policy = true;
        pi->enable_sclk_ds = true;
        pi->enable_gfx_power_gating = true;
index efee4c59239fcff8aa7b675c01cb5b9ac6bab5bd..34b9a601ad078c394513e67a5dccc345c6c02fef 100644 (file)
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
  */
 static inline s8 TEMP_TO_REG(int val)
 {
-       return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+       return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
 }
 
 static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
+       if (val > 255)
+               return -EINVAL;
 
        data->vrm = val;
        return count;
index 8fb46aab2d87d5bf85ddc2a1b68ca38341bcb058..a04c49f2a0118a887e22c16b9716656af6e87723 100644 (file)
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
 
 config BLK_DEV_CS5520
        tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
 
 config BLK_DEV_CS5530
        tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
 
 config BLK_DEV_CS5535
        tristate "AMD CS5535 chipset support"
-       depends on X86 && !X86_64
+       depends on X86_32
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
 
 config BLK_DEV_SC1200
        tristate "National SCx200 chipset support"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          This driver adds support for the on-board IDE controller on the
index 2a744a91370e640d1fe3603409b80e776d8041de..a3d3b1733c49c667f7f1495d9dfef23623e28fa0 100644 (file)
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
        if (irq_handler == NULL)
                irq_handler = ide_intr;
 
-       if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
-               goto out_up;
+       if (!host->get_lock)
+               if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+                       goto out_up;
 
 #if !defined(__mc68000__)
        printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
 
        ide_proc_unregister_port(hwif);
 
-       free_irq(hwif->irq, hwif);
+       if (!hwif->host->get_lock)
+               free_irq(hwif->irq, hwif);
 
        device_unregister(hwif->portdev);
        device_unregister(&hwif->gendev);
index 1c4c0db055509cc45df2e1865a5c0b6d86e25fc9..29ca0bb4f561e6af3960218d027bfd4ac6b300fa 100644 (file)
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
 }
 
 static int input_get_disposition(struct input_dev *dev,
-                         unsigned int type, unsigned int code, int value)
+                         unsigned int type, unsigned int code, int *pval)
 {
        int disposition = INPUT_IGNORE_EVENT;
+       int value = *pval;
 
        switch (type) {
 
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
                break;
        }
 
+       *pval = value;
        return disposition;
 }
 
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
 {
        int disposition;
 
-       disposition = input_get_disposition(dev, type, code, value);
+       disposition = input_get_disposition(dev, type, code, &value);
 
        if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
                dev->event(dev, type, code, value);
index 758b48731415a26bceff0fdb81ddc08cec0cc2c7..de7be4f03d9193884d248d506b6f75a306dbc17a 100644 (file)
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int keyscan_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
        mutex_unlock(&input->mutex);
        return retval;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
 
index e4104f9b2e6d7efb34e9e60812fdb154c90342f4..fed5102e1802075b6fd9318c2bd8aad044b317b0 100644 (file)
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
 
 module_platform_driver(sirfsoc_pwrc_driver);
 
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
 MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
 MODULE_ALIAS("platform:sirfsoc-pwrc");
index ec772d962f06800fc92915fa27d51b25fca1e56f..ef9e0b8a9aa754b6b49e26b7b050c2a7bcf1f078 100644 (file)
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                1232, 5710, 1156, 4696
        },
        {
-               (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+               (const char * const []){"LEN0034", "LEN0036", "LEN2002",
+                                       "LEN2004", NULL},
                1024, 5112, 2024, 4832
        },
        {
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0049",
        "LEN2000",
        "LEN2001", /* Edge E431 */
-       "LEN2002",
+       "LEN2002", /* Edge E531 */
        "LEN2003",
        "LEN2004", /* L440 */
        "LEN2005",
index 381b20d4c5618d8fc7f5c3e616479d4663d9ae03..136b7b204f56c8d111812559384d6acd8e732514 100644 (file)
@@ -401,6 +401,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
                },
        },
+       {
+               /* Acer Aspire 5710 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+               },
+       },
        {
                /* Gericom Bellagio */
                .matches = {
index 977d05cd9e2ea707c08c5eb6d3c9a4fd1a07adae..e73cf2c71f355993c564cee0a05b7cbd34e5f435 100644 (file)
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
                         * a=(pi*r^2)/C.
                         */
                        int a = data[5];
-                       int x_res  = input_abs_get_res(input, ABS_X);
-                       int y_res  = input_abs_get_res(input, ABS_Y);
-                       width  = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+                       int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
+                       int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
+                       width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
                        height = width * y_res / x_res;
                }
 
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
                input_abs_set_res(input_dev, ABS_X, features->x_resolution);
                input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
        } else {
-               if (features->touch_max <= 2) {
+               if (features->touch_max == 1) {
                        input_set_abs_params(input_dev, ABS_X, 0,
                                features->x_max, features->x_fuzz, 0);
                        input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
        case MTTPC:
        case MTTPC_B:
        case TABLETPC2FG:
-               if (features->device_type == BTN_TOOL_FINGER) {
-                       unsigned int flags = INPUT_MT_DIRECT;
-
-                       if (wacom_wac->features.type == TABLETPC2FG)
-                               flags = 0;
-
-                       input_mt_init_slots(input_dev, features->touch_max, flags);
-               }
+               if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
+                       input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
                /* fall through */
 
        case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                        __set_bit(BTN_RIGHT, input_dev->keybit);
 
                        if (features->touch_max) {
-                               /* touch interface */
-                               unsigned int flags = INPUT_MT_POINTER;
-
-                               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                                if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
                                        input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                                        input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MINOR,
                                                     0, features->y_max, 0, 0);
-                               } else {
-                                       __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
-                                       __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
-                                       flags = 0;
                                }
-                               input_mt_init_slots(input_dev, features->touch_max, flags);
+                               input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
                        } else {
                                /* buttons/keys only interface */
                                __clear_bit(ABS_X, input_dev->absbit);
index 4e793a17361f7b2e02955e7e6e02ece9151f8063..2ce649520fe0d67021509b7e829cf1dbb9858e19 100644 (file)
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
         */
        err = of_property_read_u32(node, "ti,coordinate-readouts",
                        &ts_dev->coordinate_readouts);
-       if (err < 0)
+       if (err < 0) {
+               dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
                err = of_property_read_u32(node, "ti,coordiante-readouts",
                                &ts_dev->coordinate_readouts);
+       }
+
        if (err < 0)
                return err;
 
index a333b7f798d17de3f4f7bc2a610b7bceaa7c6307..62f0688d45a500530fd65014432be9fbfd5a7d69 100644 (file)
@@ -638,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
                fprog.len = len;
                fprog.filter = code;
 
-               if (is->pass_filter)
+               if (is->pass_filter) {
                        sk_unattached_filter_destroy(is->pass_filter);
-               err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+                       is->pass_filter = NULL;
+               }
+               if (fprog.filter != NULL)
+                       err = sk_unattached_filter_create(&is->pass_filter,
+                                                         &fprog);
+               else
+                       err = 0;
                kfree(code);
 
                return err;
@@ -657,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
                fprog.len = len;
                fprog.filter = code;
 
-               if (is->active_filter)
+               if (is->active_filter) {
                        sk_unattached_filter_destroy(is->active_filter);
-               err = sk_unattached_filter_create(&is->active_filter, &fprog);
+                       is->active_filter = NULL;
+               }
+               if (fprog.filter != NULL)
+                       err = sk_unattached_filter_create(&is->active_filter,
+                                                         &fprog);
+               else
+                       err = 0;
                kfree(code);
 
                return err;
index 8637d2ed76238b1c76c95d6b6bf2f994f19a3562..2e3cdcfa0a675b2f8835fde25924d262f6ed6264 100644 (file)
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
                                jiffies_to_msecs(jiffies) -
                                (jiffies_to_msecs(timeout) - TIMEOUT));
 
-               if (!(cmd->args[0] >> 7) & 0x01) {
+               if (!((cmd->args[0] >> 7) & 0x01)) {
                        ret = -ETIMEDOUT;
                        goto err_mutex_unlock;
                }
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
        if (ret)
                goto err;
 
-       cmd.args[0] = 0x05;
-       cmd.args[1] = 0x00;
-       cmd.args[2] = 0xaa;
-       cmd.args[3] = 0x4d;
-       cmd.args[4] = 0x56;
-       cmd.args[5] = 0x40;
-       cmd.args[6] = 0x00;
-       cmd.args[7] = 0x00;
-       cmd.wlen = 8;
-       cmd.rlen = 1;
-       ret = si2168_cmd_execute(s, &cmd);
-       if (ret)
-               goto err;
-
        /* cold state - try to download firmware */
        dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
                        KBUILD_MODNAME, si2168_ops.info.name);
index 2a343e896f4038d66f62ba255886a09add151b16..53f7f06ae3437aca8981f8d63f8e6538f7a77978 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/firmware.h>
 #include <linux/i2c-mux.h>
 
-#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw"
+#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
 
 /* state struct */
 struct si2168 {
index 522fe00f5eee167f6fa59a657aaad1b4dce55783..9619be5d48271827b28052c080f01fc5631224b7 100644 (file)
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        int ret, i;
        u8 mode, rolloff, pilot, inversion, div;
+       fe_modulation_t modulation;
 
        dev_dbg(&priv->i2c->dev,
                        "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        switch (c->delivery_system) {
        case SYS_DVBS:
+               modulation = QPSK;
                rolloff = 0;
                pilot = 2;
                break;
        case SYS_DVBS2:
+               modulation = c->modulation;
+
                switch (c->rolloff) {
                case ROLLOFF_20:
                        rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
                if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
-                       c->modulation == TDA10071_MODCOD[i].modulation &&
+                       modulation == TDA10071_MODCOD[i].modulation &&
                        c->fec_inner == TDA10071_MODCOD[i].fec) {
                        mode = TDA10071_MODCOD[i].val;
                        dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
 
        switch ((buf[1] >> 0) & 0x01) {
        case 0:
-               c->inversion = INVERSION_OFF;
+               c->inversion = INVERSION_ON;
                break;
        case 1:
-               c->inversion = INVERSION_ON;
+               c->inversion = INVERSION_OFF;
                break;
        }
 
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
        if (ret)
                goto error;
 
-       c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0);
+       c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
 
        return ret;
 error:
index 4baf14bfb65a6d6ca4a485861492efcdc955f006..42048619273682089808eb1bffd1690704f7d13a 100644 (file)
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
        { SYS_DVBS2, QPSK,  FEC_8_9,  0x0a },
        { SYS_DVBS2, QPSK,  FEC_9_10, 0x0b },
        /* 8PSK */
+       { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
        { SYS_DVBS2, PSK_8, FEC_3_5,  0x0c },
        { SYS_DVBS2, PSK_8, FEC_2_3,  0x0d },
        { SYS_DVBS2, PSK_8, FEC_3_4,  0x0e },
index e65c760e4e8bbc5df3da9d4fa970d003d6e0cc66..0006d6bf8c18a5015dea50cbd4e0386802b80bc6 100644 (file)
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
        .read     = vb2_fop_read,
        .poll     = vb2_fop_poll,
        .mmap     = vb2_fop_mmap,
-       .ioctl    = video_ioctl2,
+       .unlocked_ioctl = video_ioctl2,
 };
 
 static const struct v4l2_ioctl_ops ts_ioctl_ops = {
index a7ed16497903ef171caf128697900aa9700b3915..1e4ec697fb1054c69becb4f4483f228508c2efe7 100644 (file)
@@ -269,6 +269,7 @@ err:
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
        }
+       spin_unlock_irqrestore(&common->irqlock, flags);
 
        return ret;
 }
index 5bb085b19bcbdad375ed9bbb52ab13851580127c..b431b58f39e3648d7d33ae6e049f1896ccd3fedc 100644 (file)
@@ -233,6 +233,7 @@ err:
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
        }
+       spin_unlock_irqrestore(&common->irqlock, flags);
 
        return ret;
 }
index 271a752cee5415a825fed6af3e2e125d1ebf343a..fa4cc7b880aa4774f4fdf6d7c6cf3ddbb010dbd8 100644 (file)
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
                        jiffies_to_msecs(jiffies) -
                        (jiffies_to_msecs(timeout) - TIMEOUT));
 
-       if (!(buf[0] >> 7) & 0x01) {
+       if (!((buf[0] >> 7) & 0x01)) {
                ret = -ETIMEDOUT;
                goto err_mutex_unlock;
        } else {
index 021e4d35e4d7d5f65311c13bb70c33df07b40d3f..7b9b75f6077471aea8209f317bbd7a636f8e8b58 100644 (file)
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
                if (ret < 0)
                        goto err;
 
-               if (tmp == 0x00)
-                       dev_dbg(&d->udev->dev,
-                                       "%s: [%d]tuner not set, using default\n",
-                                       __func__, i);
-               else
+               dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
+                               __func__, i, tmp);
+
+               /* tuner sanity check */
+               if (state->chip_type == 0x9135) {
+                       if (state->chip_version == 0x02) {
+                               /* IT9135 BX (v2) */
+                               switch (tmp) {
+                               case AF9033_TUNER_IT9135_60:
+                               case AF9033_TUNER_IT9135_61:
+                               case AF9033_TUNER_IT9135_62:
+                                       state->af9033_config[i].tuner = tmp;
+                                       break;
+                               }
+                       } else {
+                               /* IT9135 AX (v1) */
+                               switch (tmp) {
+                               case AF9033_TUNER_IT9135_38:
+                               case AF9033_TUNER_IT9135_51:
+                               case AF9033_TUNER_IT9135_52:
+                                       state->af9033_config[i].tuner = tmp;
+                                       break;
+                               }
+                       }
+               } else {
+                       /* AF9035 */
                        state->af9033_config[i].tuner = tmp;
+               }
 
-               dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
-                               __func__, i, state->af9033_config[i].tuner);
+               if (state->af9033_config[i].tuner != tmp) {
+                       dev_info(&d->udev->dev,
+                                       "%s: [%d] overriding tuner from %02x to %02x\n",
+                                       KBUILD_MODNAME, i, tmp,
+                                       state->af9033_config[i].tuner);
+               }
 
                switch (state->af9033_config[i].tuner) {
                case AF9033_TUNER_TUA9001:
index 2fd1c5e31a0f2692a1d59dd88c72cbee74e61054..339adce7c7a50974ffe9c5a2a185ae26fe8956c6 100644 (file)
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x093a, 0x2620)},
        {USB_DEVICE(0x093a, 0x2621)},
        {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
+       {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2625)},
        {USB_DEVICE(0x093a, 0x2626)},
index 0500c4175d5f095d3b27322f298aeb7130ab95e7..6bce01a674f9e12f9874636e962378f6965a4fad 100644 (file)
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
 }
 
 /*=========================================================================*/
-/* bufffer bits */
+/* buffer bits */
 
 /* function expects dev->io_mutex to be hold by caller */
 int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
        case V4L2_CID_MPEG_AUDIO_ENCODING:
                if (dev->flags & HDPVR_FLAG_AC3_CAP) {
                        opt->audio_codec = ctrl->val;
-                       return hdpvr_set_audio(dev, opt->audio_input,
+                       return hdpvr_set_audio(dev, opt->audio_input + 1,
                                              opt->audio_codec);
                }
                return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_AUDIO_ENCODING,
                ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
-               0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
+               0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_VIDEO_ENCODING,
                V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
index 4ae54caadd0375b786bf50147c6499e892790263..ce1c9f5d9dee1040c43f798b5163c4821feabbfd 100644 (file)
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
                aspect.denominator = 9;
        } else if (ratio == 34) {
                aspect.numerator = 4;
-               aspect.numerator = 3;
+               aspect.denominator = 3;
        } else if (ratio == 68) {
                aspect.numerator = 15;
-               aspect.numerator = 9;
+               aspect.denominator = 9;
        } else {
                aspect.numerator = hor_landscape + 99;
                aspect.denominator = 100;
index 14c00048bbec66bf85a3d3ef8127e8fa5f2363bf..82322b1c8411b80ff9d15f8c8022a3095f69c4d0 100644 (file)
@@ -129,14 +129,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                                                  name);
                                }
 
-                               cq->irq_desc =
-                                       irq_to_desc(mlx4_eq_get_irq(mdev->dev,
-                                                                   cq->vector));
                        }
                } else {
                        cq->vector = (cq->ring + 1 + priv->port) %
                                mdev->dev->caps.num_comp_vectors;
                }
+
+               cq->irq_desc =
+                       irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+                                                   cq->vector));
        } else {
                /* For TX we use the same irq per
                ring we assigned for the RX    */
index 06bdc31a828dce2163ca4a8cbd74d9a5cd5e65dd..61623e9af57424b1298c7db02641d78d6154b751 100644 (file)
@@ -4240,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        case RTL_GIGA_MAC_VER_40:
+               RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               break;
        case RTL_GIGA_MAC_VER_41:
        case RTL_GIGA_MAC_VER_42:
        case RTL_GIGA_MAC_VER_43:
index 1c24a8f368bd86a25835053de21ee6f96df78adb..fd411d6e19a2f4e4d37675f1d27bd011584b8fa0 100644 (file)
@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
        return vp;
 }
 
+static void vnet_cleanup(void)
+{
+       struct vnet *vp;
+       struct net_device *dev;
+
+       mutex_lock(&vnet_list_mutex);
+       while (!list_empty(&vnet_list)) {
+               vp = list_first_entry(&vnet_list, struct vnet, list);
+               list_del(&vp->list);
+               dev = vp->dev;
+               /* vio_unregister_driver() should have cleaned up port_list */
+               BUG_ON(!list_empty(&vp->port_list));
+               unregister_netdev(dev);
+               free_netdev(dev);
+       }
+       mutex_unlock(&vnet_list_mutex);
+}
+
 static const char *local_mac_prop = "local-mac-address";
 
 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
 
                kfree(port);
 
-               unregister_netdev(vp->dev);
        }
        return 0;
 }
@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
 static void __exit vnet_exit(void)
 {
        vio_unregister_driver(&vnet_port_driver);
+       vnet_cleanup();
 }
 
 module_init(vnet_init);
index e2f20f807de81a5647623d95f868a635c7cba32a..d5b77ef3a2100c3ce42ad75f4e1c9fe981f046e9 100644 (file)
@@ -757,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        };
 
                        ppp_lock(ppp);
-                       if (ppp->pass_filter)
+                       if (ppp->pass_filter) {
                                sk_unattached_filter_destroy(ppp->pass_filter);
-                       err = sk_unattached_filter_create(&ppp->pass_filter,
-                                                         &fprog);
+                               ppp->pass_filter = NULL;
+                       }
+                       if (fprog.filter != NULL)
+                               err = sk_unattached_filter_create(&ppp->pass_filter,
+                                                                 &fprog);
+                       else
+                               err = 0;
                        kfree(code);
                        ppp_unlock(ppp);
                }
@@ -778,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        };
 
                        ppp_lock(ppp);
-                       if (ppp->active_filter)
+                       if (ppp->active_filter) {
                                sk_unattached_filter_destroy(ppp->active_filter);
-                       err = sk_unattached_filter_create(&ppp->active_filter,
-                                                         &fprog);
+                               ppp->active_filter = NULL;
+                       }
+                       if (fprog.filter != NULL)
+                               err = sk_unattached_filter_create(&ppp->active_filter,
+                                                                 &fprog);
+                       else
+                               err = 0;
                        kfree(code);
                        ppp_unlock(ppp);
                }
index 5d95a13dbe2aa5cb44f6f6ff9f8ab26b47d0c418..735f7dadb9a0740dea86ed58d55df1ae58edf9ef 100644 (file)
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
          .driver_info = (unsigned long)&huawei_cdc_ncm_info,
        },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
+         .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+       },
 
        /* Terminating entry */
        {
index c4638c67f6b9f8e2a28ae09d12e6fbe74bc735cb..22756db53dcacc3138fd000cad8dbda968948fb8 100644 (file)
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+       {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
@@ -757,6 +758,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9054, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9055, 8)},    /* Netgear AirCard 341U */
        {QMI_FIXED_INTF(0x1199, 0x9056, 8)},    /* Sierra Wireless Modem */
+       {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
index 5895f19786919f6cfb36b97273a95bc1462d1c10..fa9fdfa128c1e6b732c74907fb43504c8ec2ad4c 100644 (file)
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
 {
        struct x25_asy *sl = netdev_priv(dev);
        unsigned char *xbuff, *rbuff;
-       int len = 2 * newmtu;
+       int len;
 
+       if (newmtu > 65534)
+               return -EINVAL;
+
+       len = 2 * newmtu;
        xbuff = kmalloc(len + 4, GFP_ATOMIC);
        rbuff = kmalloc(len + 4, GFP_ATOMIC);
 
index 1844a47636b67c821f03fc8a565092ab59749426..c65b636bcab9dfb3bdf3528bd3d0fcd30f7a5812 100644 (file)
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 {
        struct gnttab_map_grant_ref *gop_map = *gopp_map;
        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+       /* This always points to the shinfo of the skb being checked, which
+        * could be either the first or the one on the frag_list
+        */
        struct skb_shared_info *shinfo = skb_shinfo(skb);
+       /* If this is non-NULL, we are currently checking the frag_list skb, and
+        * this points to the shinfo of the first one
+        */
+       struct skb_shared_info *first_shinfo = NULL;
        int nr_frags = shinfo->nr_frags;
+       const bool sharedslot = nr_frags &&
+                               frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
        int i, err;
-       struct sk_buff *first_skb = NULL;
 
        /* Check status of header. */
        err = (*gopp_copy)->status;
-       (*gopp_copy)++;
        if (unlikely(err)) {
                if (net_ratelimit())
                        netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
                                   (*gopp_copy)->status,
                                   pending_idx,
                                   (*gopp_copy)->source.u.ref);
-               xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+               /* The first frag might still have this slot mapped */
+               if (!sharedslot)
+                       xenvif_idx_release(queue, pending_idx,
+                                          XEN_NETIF_RSP_ERROR);
        }
+       (*gopp_copy)++;
 
 check_frags:
        for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
                                                pending_idx,
                                                gop_map->handle);
                        /* Had a previous error? Invalidate this fragment. */
-                       if (unlikely(err))
+                       if (unlikely(err)) {
                                xenvif_idx_unmap(queue, pending_idx);
+                               /* If the mapping of the first frag was OK, but
+                                * the header's copy failed, and they are
+                                * sharing a slot, send an error
+                                */
+                               if (i == 0 && sharedslot)
+                                       xenvif_idx_release(queue, pending_idx,
+                                                          XEN_NETIF_RSP_ERROR);
+                               else
+                                       xenvif_idx_release(queue, pending_idx,
+                                                          XEN_NETIF_RSP_OKAY);
+                       }
                        continue;
                }
 
@@ -1075,42 +1097,53 @@ check_frags:
                                   gop_map->status,
                                   pending_idx,
                                   gop_map->ref);
+
                xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
 
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
                        continue;
-               /* First error: invalidate preceding fragments. */
+
+               /* First error: if the header haven't shared a slot with the
+                * first frag, release it as well.
+                */
+               if (!sharedslot)
+                       xenvif_idx_release(queue,
+                                          XENVIF_TX_CB(skb)->pending_idx,
+                                          XEN_NETIF_RSP_OKAY);
+
+               /* Invalidate preceding fragments of this skb. */
                for (j = 0; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
                        xenvif_idx_unmap(queue, pending_idx);
+                       xenvif_idx_release(queue, pending_idx,
+                                          XEN_NETIF_RSP_OKAY);
+               }
+
+               /* And if we found the error while checking the frag_list, unmap
+                * the first skb's frags
+                */
+               if (first_shinfo) {
+                       for (j = 0; j < first_shinfo->nr_frags; j++) {
+                               pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
+                               xenvif_idx_unmap(queue, pending_idx);
+                               xenvif_idx_release(queue, pending_idx,
+                                                  XEN_NETIF_RSP_OKAY);
+                       }
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
                err = newerr;
        }
 
-       if (skb_has_frag_list(skb)) {
-               first_skb = skb;
-               skb = shinfo->frag_list;
-               shinfo = skb_shinfo(skb);
+       if (skb_has_frag_list(skb) && !first_shinfo) {
+               first_shinfo = skb_shinfo(skb);
+               shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
                nr_frags = shinfo->nr_frags;
 
                goto check_frags;
        }
 
-       /* There was a mapping error in the frag_list skb. We have to unmap
-        * the first skb's frags
-        */
-       if (first_skb && err) {
-               int j;
-               shinfo = skb_shinfo(first_skb);
-               for (j = 0; j < shinfo->nr_frags; j++) {
-                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xenvif_idx_unmap(queue, pending_idx);
-               }
-       }
-
        *gopp_map = gop_map;
        return err;
 }
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
 
                /* Check the remap error code. */
                if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
+                       /* If there was an error, xenvif_tx_check_gop is
+                        * expected to release all the frags which were mapped,
+                        * so kfree_skb shouldn't do it again
+                        */
                        skb_shinfo(skb)->nr_frags = 0;
+                       if (skb_has_frag_list(skb)) {
+                               struct sk_buff *nskb =
+                                               skb_shinfo(skb)->frag_list;
+                               skb_shinfo(nskb)->nr_frags = 0;
+                       }
                        kfree_skb(skb);
                        continue;
                }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
                           tx_unmap_op.status);
                BUG();
        }
-
-       xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
 }
 
 static inline int rx_work_todo(struct xenvif_queue *queue)
index 2872ece81f358df7ff139c143e86d58031485b14..44333bd8f90886260e2219acad391dadaa9fd095 100644 (file)
@@ -5,6 +5,12 @@
 # Parport configuration.
 #
 
+config ARCH_MIGHT_HAVE_PC_PARPORT
+       bool
+       help
+         Select this config option from the architecture Kconfig if
+         the architecture might have PC parallel port hardware.
+
 menuconfig PARPORT
        tristate "Parallel port support"
        depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
 
          If unsure, say Y.
 
-config ARCH_MIGHT_HAVE_PC_PARPORT
-       bool
-       help
-         Select this config option from the architecture Kconfig if
-         the architecture might have PC parallel port hardware.
-
 if PARPORT
 
 config PARPORT_PC
index 1bd6363bc95ef2b0d0bd2b58d733159941c78fa0..9f43916637ca251372fb57efc32bdd82dde4eef1 100644 (file)
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
 
        status = readl(info->irqmux_base);
 
-       for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+       for_each_set_bit(n, &status, info->nbanks)
                __gpio_irq_handler(&info->banks[n]);
 
        chained_irq_exit(chip, desc);
index 15b3459f86562d2211dde73e507197bbea04b50e..220acb4cbee520c6cfed5c78fdf49fb776f6bf99 100644 (file)
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
        } else
                raw3270_writesf_readpart(rp);
        memset(&rp->init_reset, 0, sizeof(rp->init_reset));
-       memset(&rp->init_data, 0, sizeof(rp->init_data));
 }
 
 static int
index 69ef4f8cfac8c14366c9803eec4ed6877accf786..4038437ff033f892d30c746056bbb16d12e40c5a 100644 (file)
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
        int rc;
 
        ap_dev->drv = ap_drv;
+
+       spin_lock_bh(&ap_device_list_lock);
+       list_add(&ap_dev->list, &ap_device_list);
+       spin_unlock_bh(&ap_device_list_lock);
+
        rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
-       if (!rc) {
+       if (rc) {
                spin_lock_bh(&ap_device_list_lock);
-               list_add(&ap_dev->list, &ap_device_list);
+               list_del_init(&ap_dev->list);
                spin_unlock_bh(&ap_device_list_lock);
        }
        return rc;
index 78b0fba7047e98d37ef4025690346bd93936d3fa..8afc6fee40c547ccca5f5c106e651c3fbfce9942 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_OMAP4
        bool "OMAP 4 Camera support"
-       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+       depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
        select VIDEOBUF2_DMA_CONTIG
        ---help---
          Driver for an OMAP 4 ISS controller.
index 0b2528fb640e77e4a38a351c51f8a01f12102c14..a93f7e6ea4cf935aea64e8266b221a2c53477628 100644 (file)
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        if (unlikely(nr < 0))
                return nr;
 
-       tsk->flags = PF_DUMPCORE;
+       tsk->flags |= PF_DUMPCORE;
        if (atomic_read(&mm->mm_users) == nr + 1)
                goto done;
        /*
index 98040ba388ac1e2db62f96f253bc141758013b10..194d0d122cae566b8e82c1a1bd3188499a467796 100644 (file)
@@ -198,9 +198,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
  * L1 cache.
  */
 static inline struct page *dio_get_page(struct dio *dio,
-               struct dio_submit *sdio, size_t *from, size_t *to)
+                                       struct dio_submit *sdio)
 {
-       int n;
        if (dio_pages_present(sdio) == 0) {
                int ret;
 
@@ -209,10 +208,7 @@ static inline struct page *dio_get_page(struct dio *dio,
                        return ERR_PTR(ret);
                BUG_ON(dio_pages_present(sdio) == 0);
        }
-       n = sdio->head++;
-       *from = n ? 0 : sdio->from;
-       *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
-       return dio->pages[n];
+       return dio->pages[sdio->head];
 }
 
 /**
@@ -911,11 +907,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
        while (sdio->block_in_file < sdio->final_block_in_request) {
                struct page *page;
                size_t from, to;
-               page = dio_get_page(dio, sdio, &from, &to);
+
+               page = dio_get_page(dio, sdio);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
                }
+               from = sdio->head ? 0 : sdio->from;
+               to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
+               sdio->head++;
 
                while (from < to) {
                        unsigned this_chunk_bytes;      /* # of bytes mapped */
index 8474028d7848064912b34df229b893c39f6144e9..03246cd9d47a7b27d93ce0c2a122cae89d5fbd4f 100644 (file)
@@ -907,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->writeback_cache = 1;
                        if (arg->time_gran && arg->time_gran <= 1000000000)
                                fc->sb->s_time_gran = arg->time_gran;
-                       else
-                               fc->sb->s_time_gran = 1000000000;
-
                } else {
                        ra_pages = fc->max_read / PAGE_CACHE_SIZE;
                        fc->no_lock = 1;
@@ -938,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
                FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
                FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
                FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
-               FUSE_WRITEBACK_CACHE;
+               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
        req->in.h.opcode = FUSE_INIT;
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(*arg);
index 985c6f3684859e17439d62c7d319611305414437..9eb787e5c167fb601845590f0181d249bf515fb0 100644 (file)
@@ -2256,9 +2256,10 @@ done:
                goto out;
        }
        path->dentry = dentry;
-       path->mnt = mntget(nd->path.mnt);
+       path->mnt = nd->path.mnt;
        if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
                return 1;
+       mntget(path->mnt);
        follow_mount(path);
        error = 0;
 out:
index b56b1cc0271853b566f83f4157eaa753fd8ad2e3..944275c8f56ddf79ec457f7ad5d916f6ad7c5982 100644 (file)
@@ -2879,6 +2879,7 @@ again:
                 * return the conflicting open:
                 */
                if (conf->len) {
+                       kfree(conf->data);
                        conf->len = 0;
                        conf->data = NULL;
                        goto again;
@@ -2891,6 +2892,7 @@ again:
        if (conf->len) {
                p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
                p = xdr_encode_opaque(p, conf->data, conf->len);
+               kfree(conf->data);
        }  else {  /* non - nfsv4 lock in conflict, no clientid nor owner */
                p = xdr_encode_hyper(p, (u64)0); /* clientid */
                *p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
                nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
        else if (nfserr == nfserr_denied)
                nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
-       kfree(lock->lk_denied.ld_owner.data);
+
        return nfserr;
 }
 
index 3377dff184042044547d42e9b123944a0ad7e96a..c69e6d43a0d2e863c3e2d6680731371e0d7ef284 100644 (file)
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
 
        /* wrap around? */
        len = sizeof(*new_xattr) + size;
-       if (len <= sizeof(*new_xattr))
+       if (len < sizeof(*new_xattr))
                return NULL;
 
        new_xattr = kmalloc(len, GFP_KERNEL);
index 5ab4e3a76721760e4d5a70f88de282a687858990..92abb497ab14bb50142652ead5dcf3f630556e3f 100644 (file)
@@ -593,6 +593,7 @@ struct ata_host {
        struct device           *dev;
        void __iomem * const    *iomap;
        unsigned int            n_ports;
+       unsigned int            n_tags;                 /* nr of NCQ tags */
        void                    *private_data;
        struct ata_port_operations *ops;
        unsigned long           flags;
index 0a97b583ee8d12ae696cd3d33a13ee17c62b518f..e1474ae18c8847cba4a2f17c396c6e7b59167fed 100644 (file)
@@ -398,6 +398,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
        return read_cache_page(mapping, index, filler, data);
 }
 
+/*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+       if (unlikely(PageHeadHuge(page)))
+               return page->index << compound_order(page);
+       else
+               return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+}
+
 /*
  * Return byte-offset into filesystem object for page.
  */
index 713b0b88bd5a4cb7e0820dc16bb60bfabe0cb945..c4d86198d3d6542088ed6db8d4daed2adf191806 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nf_tables.h>
+#include <linux/u64_stats_sync.h>
 #include <net/netlink.h>
 
 #define NFT_JUMP_STACK_SIZE    16
@@ -528,8 +529,9 @@ enum nft_chain_type {
 };
 
 struct nft_stats {
-       u64 bytes;
-       u64 pkts;
+       u64                     bytes;
+       u64                     pkts;
+       struct u64_stats_sync   syncp;
 };
 
 #define NFT_HOOK_OPS_MAX               2
index 26a394cb91a8fde2bb5a3f8716d25e073e4b1862..eee608b12cc95f3267a00eed85467f94f761d298 100644 (file)
@@ -13,8 +13,8 @@ struct netns_nftables {
        struct nft_af_info      *inet;
        struct nft_af_info      *arp;
        struct nft_af_info      *bridge;
+       unsigned int            base_seq;
        u8                      gencursor;
-       u8                      genctr;
 };
 
 #endif
index 40b5ca8a1b1f3028e5e03c5b3372f98e39437422..25084a052a1eff964d19a9683d5d6470590e4c7e 100644 (file)
  *  - add FATTR_CTIME
  *  - add ctime and ctimensec to fuse_setattr_in
  *  - add FUSE_RENAME2 request
+ *  - add FUSE_NO_OPEN_SUPPORT flag
  */
 
 #ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
  * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
  * FUSE_ASYNC_DIO: asynchronous direct I/O submission
  * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
+ * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
  */
 #define FUSE_ASYNC_READ                (1 << 0)
 #define FUSE_POSIX_LOCKS       (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
 #define FUSE_READDIRPLUS_AUTO  (1 << 14)
 #define FUSE_ASYNC_DIO         (1 << 15)
 #define FUSE_WRITEBACK_CACHE   (1 << 16)
+#define FUSE_NO_OPEN_SUPPORT   (1 << 17)
 
 /**
  * CUSE INIT request/reply flags
index bda9621638ccca1a08e6ff24df9dc486464d256a..291397e66669d4d2f9f4090ce040b94f8e7187fe 100644 (file)
@@ -823,7 +823,7 @@ static struct {
        { trace_clock_local,    "local",        1 },
        { trace_clock_global,   "global",       1 },
        { trace_clock_counter,  "counter",      0 },
-       { trace_clock_jiffies,  "uptime",       1 },
+       { trace_clock_jiffies,  "uptime",       0 },
        { trace_clock,          "perf",         1 },
        ARCH_TRACE_CLOCKS
 };
index 26dc348332b798eeb43a77cf2d89357512d9e8c0..57b67b1f24d1a141f88163c385e62be25cd275cf 100644 (file)
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
 
 /*
  * trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ * Note that this use of jiffies_64 is not completely safe on
+ * 32-bit systems. But the window is tiny, and the effect if
+ * we are affected is that we will have an obviously bogus
+ * timestamp on a trace event - i.e. not life threatening.
  */
 u64 notrace trace_clock_jiffies(void)
 {
-       u64 jiffy = jiffies - INITIAL_JIFFIES;
-
-       /* Return nsecs */
-       return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+       return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
 }
 
 /*
index 2024bbd573d2a9ca8a08842cdf0b99d2062cbee1..9221c02ed9e2b8ab495e0b39482e2bcfa5ba9957 100644 (file)
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                } else {
                        if (cow)
                                huge_ptep_set_wrprotect(src, addr, src_pte);
+                       entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
                        page_dup_rmap(ptepage);
index c6399e32893178b835457388371e8e4f85512361..7211a73ba14d16155c1514c078b1e3c9d25e0b22 100644 (file)
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        if (av == NULL) /* Not actually mapped anymore */
                return;
 
-       pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff = page_to_pgoff(page);
        read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        mutex_lock(&mapping->i_mmap_mutex);
        read_lock(&tasklist_lock);
        for_each_process(tsk) {
-               pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+               pgoff_t pgoff = page_to_pgoff(page);
                struct task_struct *t = task_early_kill(tsk, force_early);
 
                if (!t)
index d67fd9fcf1f2e11d8b77c513113475df125ad99d..7e8d8205b6108fcf5b0a97aad3da36c9455b230f 100644 (file)
@@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * if page by the offset is not ready to be mapped (cold cache or
         * something).
         */
-       if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
+       if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
+           fault_around_pages() > 1) {
                pte = pte_offset_map_lock(mm, pmd, address, &ptl);
                do_fault_around(vma, address, pte, pgoff, flags);
                if (!pte_same(*pte, orig_pte))
index 9e0beaa918454abbcd63e94ee6cefb5f108f751f..be6dbf995c0cea7128fa58124057d8891cfa7933 100644 (file)
@@ -988,9 +988,10 @@ out:
         * it.  Otherwise, putback_lru_page() will drop the reference grabbed
         * during isolation.
         */
-       if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+       if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+               ClearPageSwapBacked(newpage);
                put_new_page(newpage, private);
-       else
+       else
                putback_lru_page(newpage);
 
        if (result) {
index b7e94ebbd09e88c3b356e36fe89ed72b89e14474..22a4a7699cdbeb51e86c22ebbd4b1118693042f9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 static inline unsigned long
 __vma_address(struct page *page, struct vm_area_struct *vma)
 {
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
-       if (unlikely(is_vm_hugetlb_page(vma)))
-               pgoff = page->index << huge_page_order(page_hstate(page));
-
+       pgoff_t pgoff = page_to_pgoff(page);
        return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 }
 
@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
 {
        struct anon_vma *anon_vma;
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff_t pgoff = page_to_pgoff(page);
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
 {
        struct address_space *mapping = page->mapping;
-       pgoff_t pgoff = page->index << compound_order(page);
+       pgoff_t pgoff = page_to_pgoff(page);
        struct vm_area_struct *vma;
        int ret = SWAP_AGAIN;
 
index 1140f49b6ded6f7a72d89d2e89f9fce0df1a940d..af68b15a8fc1f99ede5cf82a38aecfc6b3b6eda6 100644 (file)
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
  * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
-       int     mode;           /* FALLOC_FL mode currently operating */
+       wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                return;
 
        index = start;
-       for ( ; ; ) {
+       while (index < end) {
                cond_resched();
 
                pvec.nr = find_get_entries(mapping, index,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                pvec.pages, indices);
                if (!pvec.nr) {
-                       if (index == start || unfalloc)
+                       /* If all gone or hole-punch or unfalloc, we're done */
+                       if (index == start || end != -1)
                                break;
+                       /* But if truncating, restart to make sure all gone */
                        index = start;
                        continue;
                }
-               if ((index == start || unfalloc) && indices[0] >= end) {
-                       pagevec_remove_exceptionals(&pvec);
-                       pagevec_release(&pvec);
-                       break;
-               }
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        if (radix_tree_exceptional_entry(page)) {
                                if (unfalloc)
                                        continue;
-                               nr_swaps_freed += !shmem_free_swap(mapping,
-                                                               index, page);
+                               if (shmem_free_swap(mapping, index, page)) {
+                                       /* Swap was replaced by page: retry */
+                                       index--;
+                                       break;
+                               }
+                               nr_swaps_freed++;
                                continue;
                        }
 
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                                if (page->mapping == mapping) {
                                        VM_BUG_ON_PAGE(PageWriteback(page), page);
                                        truncate_inode_page(mapping, page);
+                               } else {
+                                       /* Page was replaced by swap: retry */
+                                       unlock_page(page);
+                                       index--;
+                                       break;
                                }
                        }
                        unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
-                           !shmem_falloc->mode &&
+                           !shmem_falloc->waitq &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         * Trinity finds that probing a hole which tmpfs is punching can
         * prevent the hole-punch from ever completing: which in turn
         * locks writers out with its hold on i_mutex.  So refrain from
-        * faulting pages into the hole while it's being punched, and
-        * wait on i_mutex to be released if vmf->flags permits.
+        * faulting pages into the hole while it's being punched.  Although
+        * shmem_undo_range() does remove the additions, it may be unable to
+        * keep up, as each new page needs its own unmap_mapping_range() call,
+        * and the i_mmap tree grows ever slower to scan if new vmas are added.
+        *
+        * It does not matter if we sometimes reach this check just before the
+        * hole-punch begins, so that one fault then races with the punch:
+        * we just need to make racing faults a rare case.
+        *
+        * The implementation below would be much simpler if we just used a
+        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * and bloating every shmem inode for this unlikely case would be sad.
         */
        if (unlikely(inode->i_private)) {
                struct shmem_falloc *shmem_falloc;
 
                spin_lock(&inode->i_lock);
                shmem_falloc = inode->i_private;
-               if (!shmem_falloc ||
-                   shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
-                   vmf->pgoff < shmem_falloc->start ||
-                   vmf->pgoff >= shmem_falloc->next)
-                       shmem_falloc = NULL;
-               spin_unlock(&inode->i_lock);
-               /*
-                * i_lock has protected us from taking shmem_falloc seriously
-                * once return from shmem_fallocate() went back up that stack.
-                * i_lock does not serialize with i_mutex at all, but it does
-                * not matter if sometimes we wait unnecessarily, or sometimes
-                * miss out on waiting: we just need to make those cases rare.
-                */
-               if (shmem_falloc) {
+               if (shmem_falloc &&
+                   shmem_falloc->waitq &&
+                   vmf->pgoff >= shmem_falloc->start &&
+                   vmf->pgoff < shmem_falloc->next) {
+                       wait_queue_head_t *shmem_falloc_waitq;
+                       DEFINE_WAIT(shmem_fault_wait);
+
+                       ret = VM_FAULT_NOPAGE;
                        if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
                           !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               /* It's polite to up mmap_sem if we can */
                                up_read(&vma->vm_mm->mmap_sem);
-                               mutex_lock(&inode->i_mutex);
-                               mutex_unlock(&inode->i_mutex);
-                               return VM_FAULT_RETRY;
+                               ret = VM_FAULT_RETRY;
                        }
-                       /* cond_resched? Leave that to GUP or return to user */
-                       return VM_FAULT_NOPAGE;
+
+                       shmem_falloc_waitq = shmem_falloc->waitq;
+                       prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&inode->i_lock);
+                       schedule();
+
+                       /*
+                        * shmem_falloc_waitq points into the shmem_fallocate()
+                        * stack of the hole-punching task: shmem_falloc_waitq
+                        * is usually invalid by the time we reach here, but
+                        * finish_wait() does not dereference it in that case;
+                        * though i_lock needed lest racing with wake_up_all().
+                        */
+                       spin_lock(&inode->i_lock);
+                       finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+                       spin_unlock(&inode->i_lock);
+                       return ret;
                }
+               spin_unlock(&inode->i_lock);
        }
 
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
 
        mutex_lock(&inode->i_mutex);
 
-       shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
-
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
 
+               shmem_falloc.waitq = &shmem_falloc_waitq;
                shmem_falloc.start = unmap_start >> PAGE_SHIFT;
                shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
                spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
+
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               wake_up_all(&shmem_falloc_waitq);
+               spin_unlock(&inode->i_lock);
                error = 0;
-               goto undone;
+               goto out;
        }
 
        /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                goto out;
        }
 
+       shmem_falloc.waitq = NULL;
        shmem_falloc.start = start;
        shmem_falloc.next  = start;
        shmem_falloc.nr_falloced = 0;
index 735e01a0db6f8c3ffa28150e5faedf362dc5b874..d31c4bacc6a203b0bc555bd76c2a97e90e78fa6c 100644 (file)
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
                        continue;
                }
 
-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
+#if !defined(CONFIG_SLUB)
                if (!strcmp(s->name, name)) {
                        pr_err("%s (%s): Cache name already exists.\n",
                               __func__, name);
index 6a78c814bebfb151b1e490424c731edc172ad430..eda2473071648cc47935dd9e21e9f57fd402a4dd 100644 (file)
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
        for ( ; ; ) {
                cond_resched();
                if (!pagevec_lookup_entries(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE),
-                       indices)) {
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
+                       /* If all gone from start onwards, we're done */
                        if (index == start)
                                break;
+                       /* Otherwise restart to make sure all gone */
                        index = start;
                        continue;
                }
                if (index == start && indices[0] >= end) {
+                       /* All gone out of hole to be punched, we're done */
                        pagevec_remove_exceptionals(&pvec);
                        pagevec_release(&pvec);
                        break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
                        /* We rely upon deletion not changing page->index */
                        index = indices[i];
-                       if (index >= end)
+                       if (index >= end) {
+                               /* Restart punch to make sure all gone */
+                               index = start - 1;
                                break;
+                       }
 
                        if (radix_tree_exceptional_entry(page)) {
                                clear_exceptional_entry(mapping, index, page);
index 6f0d9ec3795059fdc5319574b65b24c08aaf2790..a957c8140721def2878b292c2e61e2dd37c36e5c 100644 (file)
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
        bla_dst_own = &bat_priv->bla.claim_dest;
 
-       /* check if it is a claim packet in general */
-       if (memcmp(bla_dst->magic, bla_dst_own->magic,
-                  sizeof(bla_dst->magic)) != 0)
-               return 0;
-
        /* if announcement packet, use the source,
         * otherwise assume it is in the hw_src
         */
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    struct sk_buff *skb)
 {
-       struct batadv_bla_claim_dst *bla_dst;
+       struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
        uint8_t *hw_src, *hw_dst;
-       struct vlan_ethhdr *vhdr;
+       struct vlan_hdr *vhdr, vhdr_buf;
        struct ethhdr *ethhdr;
        struct arphdr *arphdr;
        unsigned short vid;
+       int vlan_depth = 0;
        __be16 proto;
        int headlen;
        int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        proto = ethhdr->h_proto;
        headlen = ETH_HLEN;
        if (vid & BATADV_VLAN_HAS_TAG) {
-               vhdr = vlan_eth_hdr(skb);
-               proto = vhdr->h_vlan_encapsulated_proto;
-               headlen += VLAN_HLEN;
+               /* Traverse the VLAN/Ethertypes.
+                *
+                * At this point it is known that the first protocol is a VLAN
+                * header, so start checking at the encapsulated protocol.
+                *
+                * The depth of the VLAN headers is recorded to drop BLA claim
+                * frames encapsulated into multiple VLAN headers (QinQ).
+                */
+               do {
+                       vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
+                                                 &vhdr_buf);
+                       if (!vhdr)
+                               return 0;
+
+                       proto = vhdr->h_vlan_encapsulated_proto;
+                       headlen += VLAN_HLEN;
+                       vlan_depth++;
+               } while (proto == htons(ETH_P_8021Q));
        }
 
        if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
        hw_dst = hw_src + ETH_ALEN + 4;
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
+       bla_dst_own = &bat_priv->bla.claim_dest;
+
+       /* check if it is a claim frame in general */
+       if (memcmp(bla_dst->magic, bla_dst_own->magic,
+                  sizeof(bla_dst->magic)) != 0)
+               return 0;
+
+       /* check if there is a claim frame encapsulated deeper in (QinQ) and
+        * drop that, as this is not supported by BLA but should also not be
+        * sent via the mesh.
+        */
+       if (vlan_depth > 1)
+               return 1;
 
        /* check if it is a claim frame. */
        ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
index e7ee65dc20bf4f25a1a8d0134c66b0bfaef25bd3..cbd677f48c00541fc8ff9aed5b0943d3855b1810 100644 (file)
@@ -448,10 +448,15 @@ out:
  *  possibly free it
  * @softif_vlan: the vlan object to release
  */
-void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
 {
-       if (atomic_dec_and_test(&softif_vlan->refcount))
-               kfree_rcu(softif_vlan, rcu);
+       if (atomic_dec_and_test(&vlan->refcount)) {
+               spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+               hlist_del_rcu(&vlan->list);
+               spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+
+               kfree_rcu(vlan, rcu);
+       }
 }
 
 /**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
        if (!vlan)
                return -ENOMEM;
 
+       vlan->bat_priv = bat_priv;
        vlan->vid = vid;
        atomic_set(&vlan->refcount, 1);
 
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
                return err;
        }
 
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
        /* add a new TT local entry. This one will be marked with the NOPURGE
         * flag
         */
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
                            bat_priv->soft_iface->dev_addr, vid,
                            BATADV_NULL_IFINDEX, BATADV_NO_MARK);
 
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
        return 0;
 }
 
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
                                       struct batadv_softif_vlan *vlan)
 {
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       hlist_del_rcu(&vlan->list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
-       batadv_sysfs_del_vlan(bat_priv, vlan);
-
        /* explicitly remove the associated TT local entry because it is marked
         * with the NOPURGE flag
         */
        batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
                               vlan->vid, "vlan interface destroyed", false);
 
+       batadv_sysfs_del_vlan(bat_priv, vlan);
        batadv_softif_vlan_free_ref(vlan);
 }
 
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
                                    unsigned short vid)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
+       struct batadv_softif_vlan *vlan;
+       int ret;
 
        /* only 802.1Q vlans are supported.
         * batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 
        vid |= BATADV_VLAN_HAS_TAG;
 
-       return batadv_softif_create_vlan(bat_priv, vid);
+       /* if a new vlan is getting created and it already exists, it means that
+        * it was not deleted yet. batadv_softif_vlan_get() increases the
+        * refcount in order to revive the object.
+        *
+        * if it does not exist then create it.
+        */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan)
+               return batadv_softif_create_vlan(bat_priv, vid);
+
+       /* recreate the sysfs object if it was already destroyed (and it should
+        * be since we received a kill_vid() for this vlan
+        */
+       if (!vlan->kobj) {
+               ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+               if (ret) {
+                       batadv_softif_vlan_free_ref(vlan);
+                       return ret;
+               }
+       }
+
+       /* add a new TT local entry. This one will be marked with the NOPURGE
+        * flag. This must be added again, even if the vlan object already
+        * exists, because the entry was deleted by kill_vid()
+        */
+       batadv_tt_local_add(bat_priv->soft_iface,
+                           bat_priv->soft_iface->dev_addr, vid,
+                           BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+
+       return 0;
 }
 
 /**
index d636bde72c9ace9cfbcead01353c955f17923155..5f59e7f899a0179a544764207468c6b4b336a237 100644 (file)
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_tt_local_entry *tt_local;
        struct batadv_tt_global_entry *tt_global = NULL;
+       struct batadv_softif_vlan *vlan;
        struct net_device *in_dev = NULL;
        struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (!tt_local)
                goto out;
 
+       /* increase the refcounter of the related vlan */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
                   addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (unlikely(hash_added != 0)) {
                /* remove the reference for the hash */
                batadv_tt_local_entry_free_ref(tt_local);
+               batadv_softif_vlan_free_ref(vlan);
                goto out;
        }
 
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
 {
        struct batadv_tt_local_entry *tt_local_entry;
        uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+       struct batadv_softif_vlan *vlan;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
        hlist_del_rcu(&tt_local_entry->common.hash_entry);
        batadv_tt_local_entry_free_ref(tt_local_entry);
 
+       /* decrease the reference held for this vlan */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       batadv_softif_vlan_free_ref(vlan);
+       batadv_softif_vlan_free_ref(vlan);
+
 out:
        if (tt_local_entry)
                batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
+       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                        tt_local = container_of(tt_common_entry,
                                                struct batadv_tt_local_entry,
                                                common);
+
+                       /* decrease the reference held for this vlan */
+                       vlan = batadv_softif_vlan_get(bat_priv,
+                                                     tt_common_entry->vid);
+                       batadv_softif_vlan_free_ref(vlan);
+                       batadv_softif_vlan_free_ref(vlan);
+
                        batadv_tt_local_entry_free_ref(tt_local);
                }
                spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
+       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
                                                common);
+
+                       /* decrease the reference held for this vlan */
+                       vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
+                       batadv_softif_vlan_free_ref(vlan);
+                       batadv_softif_vlan_free_ref(vlan);
+
                        batadv_tt_local_entry_free_ref(tt_local);
                }
                spin_unlock_bh(list_lock);
index 34891a56773f09ebcccab01fe3191b1a56651aed..8854c05622a9bae2b8f30b0cf91686e8c629a849 100644 (file)
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
 
 /**
  * struct batadv_softif_vlan - per VLAN attributes set
+ * @bat_priv: pointer to the mesh object
  * @vid: VLAN identifier
  * @kobj: kobject for sysfs vlan subdirectory
  * @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
  * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_softif_vlan {
+       struct batadv_priv *bat_priv;
        unsigned short vid;
        struct kobject *kobj;
        atomic_t ap_isolation;          /* boolean */
index 7990984ca364093f77964d3ac1bbfcbc8f366595..367a586d0c8a851ad0b5ea57fab32dfe894d662c 100644 (file)
@@ -4096,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        skb->vlan_tci = 0;
        skb->dev = napi->dev;
        skb->skb_iif = 0;
+       skb->encapsulation = 0;
+       skb_shinfo(skb)->gso_type = 0;
        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
 
        napi->skb = skb;
index 9acec61f54334f146d87711f145f1992dbf2c360..dd8696a3dbec9b907cc9fa3744a8bf37e8efca93 100644 (file)
@@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
                goto put;
 
        memcpy(*_result, upayload->data, len);
-       *_result[len] = '\0';
+       (*_result)[len] = '\0';
 
        if (_expiry)
                *_expiry = rkey->expiry;
index d5e6836cf772d677271c46681ffa442baa9d0c99..d156b3c5f3631f169f179bfdd534b69ce5070b82 100644 (file)
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
        int proto = iph->protocol;
        int err = -ENOSYS;
 
+       if (skb->encapsulation)
+               skb_set_inner_network_header(skb, nhoff);
+
        csum_replace2(&iph->check, iph->tot_len, newlen);
        iph->tot_len = newlen;
 
index eb92deb12666fb56b6a289c55b8205fc27117933..f0bdd47bbbcb5f420e9c3e406ca9dd1e83d0e554 100644 (file)
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
        int err = -ENOENT;
        __be16 type;
 
+       skb->encapsulation = 1;
+       skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
+
        type = greh->protocol;
        if (greh->flags & GRE_KEY)
                grehlen += GRE_HEADER_SECTION;
index 5e7aecea05cd2afbd3e3e13f417e26687517b468..ad382499bace4ce3852c2953bb262f2796d9a416 100644 (file)
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
                        optptr++;
                        continue;
                }
+               if (unlikely(l < 2)) {
+                       pp_ptr = optptr;
+                       goto error;
+               }
                optlen = optptr[1];
                if (optlen < 2 || optlen > l) {
                        pp_ptr = optptr;
index 4e86c59ec7f7f07fe06c6db20d17851da6f1563f..55046ecd083ea8afb964205eb8ea38e2bbe91708 100644 (file)
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
 
        th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
                                  iph->daddr, 0);
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+       skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 
        return tcp_gro_complete(skb);
 }
index 8517d3cd1aed460bbfb1bfb0f515924f008b790d..01b0ff9a0c2c00d6734537254e2150edcbcb64d7 100644 (file)
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
 
        th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
                                  &iph->daddr, 0);
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+       skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
 
        return tcp_gro_complete(skb);
 }
index ab4566cfcbe497beea641dff934e24dc3341096b..8746ff9a83571d97b3991dced7ad3f968c15bdfa 100644 (file)
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
 {
        INIT_LIST_HEAD(&afi->tables);
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&afi->list, &net->nft.af_info);
+       list_add_tail_rcu(&afi->list, &net->nft.af_info);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
 void nft_unregister_afinfo(struct nft_af_info *afi)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&afi->list);
+       list_del_rcu(&afi->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
                        if (idx < s_idx)
                                goto cont;
                        if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
                                                      NLM_F_MULTI,
                                                      afi->family, table) < 0)
                                goto done;
+
+                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                        idx++;
                }
        }
 done:
+       rcu_read_unlock();
        cb->args[0] = idx;
        return skb->len;
 }
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
        if (flags & ~NFT_TABLE_F_DORMANT)
                return -EINVAL;
 
+       if (flags == ctx->table->flags)
+               return 0;
+
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
                                sizeof(struct nft_trans_table));
        if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
                module_put(afi->owner);
                return err;
        }
-       list_add_tail(&table->list, &afi->tables);
+       list_add_tail_rcu(&table->list, &afi->tables);
        return 0;
 }
 
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       list_del(&table->list);
+       list_del_rcu(&table->list);
        return 0;
 }
 
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
 {
        struct nft_stats *cpu_stats, total;
        struct nlattr *nest;
+       unsigned int seq;
+       u64 pkts, bytes;
        int cpu;
 
        memset(&total, 0, sizeof(total));
        for_each_possible_cpu(cpu) {
                cpu_stats = per_cpu_ptr(stats, cpu);
-               total.pkts += cpu_stats->pkts;
-               total.bytes += cpu_stats->bytes;
+               do {
+                       seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       pkts = cpu_stats->pkts;
+                       bytes = cpu_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+               total.pkts += pkts;
+               total.bytes += bytes;
        }
        nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
        if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
-                       list_for_each_entry(chain, &table->chains, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       list_for_each_entry_rcu(chain, &table->chains, list) {
                                if (idx < s_idx)
                                        goto cont;
                                if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
                                                              NLM_F_MULTI,
                                                              afi->family, table, chain) < 0)
                                        goto done;
+
+                               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                idx++;
                        }
                }
        }
 done:
+       rcu_read_unlock();
        cb->args[0] = idx;
        return skb->len;
 }
 
-
 static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
        if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
                return ERR_PTR(-EINVAL);
 
-       newstats = alloc_percpu(struct nft_stats);
+       newstats = netdev_alloc_pcpu_stats(struct nft_stats);
        if (newstats == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        }
                        basechain->stats = stats;
                } else {
-                       stats = alloc_percpu(struct nft_stats);
+                       stats = netdev_alloc_pcpu_stats(struct nft_stats);
                        if (IS_ERR(stats)) {
                                module_put(type->owner);
                                kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                goto err2;
 
        table->use++;
-       list_add_tail(&chain->list, &table->chains);
+       list_add_tail_rcu(&chain->list, &table->chains);
        return 0;
 err2:
        if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
                return err;
 
        table->use--;
-       list_del(&chain->list);
+       list_del_rcu(&chain->list);
        return 0;
 }
 
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
        if (type->family == NFPROTO_UNSPEC)
-               list_add_tail(&type->list, &nf_tables_expressions);
+               list_add_tail_rcu(&type->list, &nf_tables_expressions);
        else
-               list_add(&type->list, &nf_tables_expressions);
+               list_add_rcu(&type->list, &nf_tables_expressions);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
 void nft_unregister_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&type->list);
+       list_del_rcu(&type->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
        unsigned int idx = 0, s_idx = cb->args[0];
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
-       u8 genctr = ACCESS_ONCE(net->nft.genctr);
-       u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
-                       list_for_each_entry(chain, &table->chains, list) {
-                               list_for_each_entry(rule, &chain->rules, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       list_for_each_entry_rcu(chain, &table->chains, list) {
+                               list_for_each_entry_rcu(rule, &chain->rules, list) {
                                        if (!nft_rule_is_active(net, rule))
                                                goto cont;
                                        if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
                                                                      NLM_F_MULTI | NLM_F_APPEND,
                                                                      afi->family, table, chain, rule) < 0)
                                                goto done;
+
+                                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                        idx++;
                                }
@@ -1579,9 +1603,7 @@ cont:
                }
        }
 done:
-       /* Invalidate this dump, a transition to the new generation happened */
-       if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
-               return -EBUSY;
+       rcu_read_unlock();
 
        cb->args[0] = idx;
        return skb->len;
@@ -1932,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
 int nft_register_set(struct nft_set_ops *ops)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&ops->list, &nf_tables_set_ops);
+       list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1941,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
 void nft_unregister_set(struct nft_set_ops *ops)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&ops->list);
+       list_del_rcu(&ops->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2234,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(set, &ctx->table->sets, list) {
+       rcu_read_lock();
+       cb->seq = ctx->net->nft.base_seq;
+
+       list_for_each_entry_rcu(set, &ctx->table->sets, list) {
                if (idx < s_idx)
                        goto cont;
                if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2242,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
                        cb->args[0] = idx;
                        goto done;
                }
+               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                idx++;
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
@@ -2260,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(table, &ctx->afi->tables, list) {
+       rcu_read_lock();
+       cb->seq = ctx->net->nft.base_seq;
+
+       list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
                if (cur_table) {
                        if (cur_table != table)
                                continue;
@@ -2269,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
                }
                ctx->table = table;
                idx = 0;
-               list_for_each_entry(set, &ctx->table->sets, list) {
+               list_for_each_entry_rcu(set, &ctx->table->sets, list) {
                        if (idx < s_idx)
                                goto cont;
                        if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2278,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
                                cb->args[2] = (unsigned long) table;
                                goto done;
                        }
+                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                        idx++;
                }
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
@@ -2300,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (cur_family) {
                        if (afi->family != cur_family)
                                continue;
@@ -2308,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                        cur_family = 0;
                }
 
-               list_for_each_entry(table, &afi->tables, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
                        if (cur_table) {
                                if (cur_table != table)
                                        continue;
@@ -2319,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                        ctx->table = table;
                        ctx->afi = afi;
                        idx = 0;
-                       list_for_each_entry(set, &ctx->table->sets, list) {
+                       list_for_each_entry_rcu(set, &ctx->table->sets, list) {
                                if (idx < s_idx)
                                        goto cont;
                                if (nf_tables_fill_set(skb, ctx, set,
@@ -2330,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                                        cb->args[3] = afi->family;
                                        goto done;
                                }
+                               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                idx++;
                        }
@@ -2339,6 +2375,7 @@ cont:
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
@@ -2597,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                goto err2;
 
-       list_add_tail(&set->list, &table->sets);
+       list_add_tail_rcu(&set->list, &table->sets);
        table->use++;
        return 0;
 
@@ -2617,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set)
 
 static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
 {
-       list_del(&set->list);
+       list_del_rcu(&set->list);
        nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
        nft_set_destroy(set);
 }
@@ -2652,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       list_del(&set->list);
+       list_del_rcu(&set->list);
        ctx.table->use--;
        return 0;
 }
@@ -2704,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
        }
 bind:
        binding->chain = ctx->chain;
-       list_add_tail(&binding->list, &set->bindings);
+       list_add_tail_rcu(&binding->list, &set->bindings);
        return 0;
 }
 
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
                          struct nft_set_binding *binding)
 {
-       list_del(&binding->list);
+       list_del_rcu(&binding->list);
 
        if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
            !(set->flags & NFT_SET_INACTIVE))
@@ -3346,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb)
        struct nft_set *set;
 
        /* Bump generation counter, invalidate any dump in progress */
-       net->nft.genctr++;
+       while (++net->nft.base_seq == 0);
 
        /* A new generation has just started */
        net->nft.gencursor = gencursor_next(net);
@@ -3491,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb)
                                }
                                nft_trans_destroy(trans);
                        } else {
-                               list_del(&trans->ctx.table->list);
+                               list_del_rcu(&trans->ctx.table->list);
                        }
                        break;
                case NFT_MSG_DELTABLE:
-                       list_add_tail(&trans->ctx.table->list,
-                                     &trans->ctx.afi->tables);
+                       list_add_tail_rcu(&trans->ctx.table->list,
+                                         &trans->ctx.afi->tables);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWCHAIN:
@@ -3507,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb)
                                nft_trans_destroy(trans);
                        } else {
                                trans->ctx.table->use--;
-                               list_del(&trans->ctx.chain->list);
+                               list_del_rcu(&trans->ctx.chain->list);
                                if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
                                    trans->ctx.chain->flags & NFT_BASE_CHAIN) {
                                        nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3517,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb)
                        break;
                case NFT_MSG_DELCHAIN:
                        trans->ctx.table->use++;
-                       list_add_tail(&trans->ctx.chain->list,
-                                     &trans->ctx.table->chains);
+                       list_add_tail_rcu(&trans->ctx.chain->list,
+                                         &trans->ctx.table->chains);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWRULE:
@@ -3532,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb)
                        break;
                case NFT_MSG_NEWSET:
                        trans->ctx.table->use--;
-                       list_del(&nft_trans_set(trans)->list);
+                       list_del_rcu(&nft_trans_set(trans)->list);
                        break;
                case NFT_MSG_DELSET:
                        trans->ctx.table->use++;
-                       list_add_tail(&nft_trans_set(trans)->list,
-                                     &trans->ctx.table->sets);
+                       list_add_tail_rcu(&nft_trans_set(trans)->list,
+                                         &trans->ctx.table->sets);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWSETELEM:
@@ -3951,6 +3988,7 @@ static int nf_tables_init_net(struct net *net)
 {
        INIT_LIST_HEAD(&net->nft.af_info);
        INIT_LIST_HEAD(&net->nft.commit_list);
+       net->nft.base_seq = 1;
        return 0;
 }
 
index 345acfb1720b14f00aae0e5937ab07bfb90e9482..3b90eb2b2c55453e989c891a3f815be6e1da22d1 100644 (file)
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        struct nft_data data[NFT_REG_MAX + 1];
        unsigned int stackptr = 0;
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
-       struct nft_stats __percpu *stats;
+       struct nft_stats *stats;
        int rulenum;
        /*
         * Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
                nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
 
        rcu_read_lock_bh();
-       stats = rcu_dereference(nft_base_chain(basechain)->stats);
-       __this_cpu_inc(stats->pkts);
-       __this_cpu_add(stats->bytes, pkt->skb->len);
+       stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
+       u64_stats_update_begin(&stats->syncp);
+       stats->pkts++;
+       stats->bytes += pkt->skb->len;
+       u64_stats_update_end(&stats->syncp);
        rcu_read_unlock_bh();
 
        return nft_base_chain(basechain)->policy;
index c39b583ace3229d4bae6a7b3774593e5eebd7141..70c0be8d0121db461c1e21793a1b68d844f37e8b 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/errno.h>
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
+#include <linux/bitmap.h>
 #include <net/netlink.h>
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
        return 0;
 }
 
+#define NR_U32_NODE (1<<12)
 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
 {
        struct tc_u_knode *n;
-       unsigned int i = 0x7FF;
+       unsigned long i;
+       unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
+                                       GFP_KERNEL);
+       if (!bitmap)
+               return handle | 0xFFF;
 
        for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
-               if (i < TC_U32_NODE(n->handle))
-                       i = TC_U32_NODE(n->handle);
-       i++;
+               set_bit(TC_U32_NODE(n->handle), bitmap);
 
-       return handle | (i > 0xFFF ? 0xFFF : i);
+       i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
+       if (i >= NR_U32_NODE)
+               i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
+
+       kfree(bitmap);
+       return handle | (i >= NR_U32_NODE ? 0xFFF : i);
 }
 
 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
index 6af50eb80ea7517680dbb7b18981eb9906fde5ae..70faa3a325264a68511a2edca0efa225ebe40942 100644 (file)
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
        struct special_params *params = bebob->maudio_special_quirk;
        int err, id;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
        if (id >= ARRAY_SIZE(special_clk_labels))
-               return 0;
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob, id,
                                         params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
                                         params->clk_lock);
        mutex_unlock(&bebob->mutex);
 
-       return err >= 0;
+       if (err >= 0)
+               err = 1;
+
+       return err;
 }
 static struct snd_kcontrol_new special_clk_ctl = {
        .name   = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
        .get    = special_sync_ctl_get,
 };
 
-/* Digital interface control for special firmware */
-static char *const special_dig_iface_labels[] = {
+/* Digital input interface control for special firmware */
+static char *const special_dig_in_iface_labels[] = {
        "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
 };
 static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels);
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item]);
+              special_dig_in_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id, dig_in_fmt, dig_in_iface;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
+               return -EINVAL;
 
        /* decode user value */
        dig_in_fmt = (id >> 1) & 0x01;
        dig_in_iface = id & 0x01;
 
+       mutex_lock(&bebob->mutex);
+
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         dig_in_fmt,
                                         params->dig_out_fmt,
                                         params->clk_lock);
-       if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */
+       if (err < 0)
+               goto end;
+
+       /* For ADAT, optical interface is only available. */
+       if (params->dig_in_fmt > 0) {
+               err = 1;
                goto end;
+       }
 
+       /* For S/PDIF, optical/coaxial interfaces are selectable. */
        err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
        if (err < 0)
                dev_err(&bebob->unit->device,
                        "fail to set digital input interface: %d\n", err);
+       err = 1;
 end:
        special_stream_formation_set(bebob);
        mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
        .put    = special_dig_in_iface_ctl_set
 };
 
+/* Digital output interface control for special firmware */
+static char *const special_dig_out_iface_labels[] = {
+       "S/PDIF Optical and Coaxial", "ADAT Optical"
+};
 static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
                                          struct snd_ctl_elem_info *einf)
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1;
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item + 1]);
+              special_dig_out_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         params->dig_in_fmt,
                                         id, params->clk_lock);
-       if (err >= 0)
+       if (err >= 0) {
                special_stream_formation_set(bebob);
+               err = 1;
+       }
 
        mutex_unlock(&bebob->mutex);
        return err;