Merge tag 'mlx5-updates-2019-02-19' of git://git.kernel.org/pub/scm/linux/kernel...
authorDavid S. Miller <davem@davemloft.net>
Thu, 21 Feb 2019 04:13:58 +0000 (20:13 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 21 Feb 2019 04:13:58 +0000 (20:13 -0800)
Saeed Mahameed says:

====================
mlx5-updates-2019-02-19

This series includes misc updates to mlx5 drivers and one ethtool update.

1) From Aya Levin:
   - ethtool: Define 50Gbps per lane link modes
   - add support for 50Gbps per lane link modes in mlx5 driver

2) From Tariq Toukan,
   - Add a helper function to unify mlx5 resource reloading

3) From Vlad Buslov,
   - Remove wrong and superfluous tc pedit header type check

4) From Tonghao Zhang,
   - Some refactoring in en_tc.c to simplify the mlx5e_tc_add_fdb_flow

5) From Leon Romanovsky & Saeed,
   - Compilation warning fixes

6) From Bodong wang,
   - E-Switch fixes that are related to the SmarNIC series
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
154 files changed:
Documentation/networking/msg_zerocopy.rst
Makefile
arch/arm/Kconfig
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap5-cm-t54.dts
arch/arm/boot/dts/rk3188.dtsi
arch/arm/include/asm/irq.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/stage2_pgtable.h
arch/arm/kernel/irq.c
arch/arm/kernel/smp.c
arch/arm/kvm/coproc.c
arch/arm/kvm/reset.c
arch/arm/mach-omap2/cpuidle44xx.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/mm/dma-mapping.c
arch/arm/probes/kprobes/opt-arm.c
arch/arm64/boot/dts/freescale/imx8mq-evk.dts
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/memory.h
arch/arm64/kernel/setup.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
arch/csky/include/asm/pgtable.h
arch/csky/include/asm/processor.h
arch/csky/kernel/dumpstack.c
arch/csky/kernel/ptrace.c
arch/csky/kernel/smp.c
arch/csky/mm/ioremap.c
arch/mips/net/ebpf_jit.c
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/perf_event.h
arch/x86/ia32/ia32_aout.c
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/uv/bios.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/platform/uv/bios_uv.c
crypto/af_alg.c
drivers/auxdisplay/ht16k33.c
drivers/bus/ti-sysc.c
drivers/clocksource/timer-ti-dm.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/runtime-wrappers.c
drivers/i2c/busses/i2c-bcm2835.c
drivers/i2c/busses/i2c-cadence.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/cap11xx.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/keyboard/qt2160.c
drivers/input/keyboard/st-keyscan.c
drivers/input/misc/apanel.c
drivers/input/misc/bma150.c
drivers/input/misc/pwm-vibra.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elantech.c
drivers/input/serio/ps2-gpio.c
drivers/mailbox/bcm-flexrm-mailbox.c
drivers/mailbox/mailbox.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/b53/b53_priv.h
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/qca8k.h
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/freescale/enetc/enetc_ptp.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/phy/xilinx_gmii2rgmii.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/ptp/ptp_qoriq.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/sd.c
drivers/vhost/vhost.c
fs/binfmt_script.c
fs/nfs/write.c
fs/nfsd/nfsctl.c
include/kvm/arm_vgic.h
include/linux/compiler_attributes.h
include/linux/efi.h
include/linux/fsl/ptp_qoriq.h
include/linux/memblock.h
include/linux/module.h
include/linux/netdev_features.h
include/linux/perf_event.h
include/linux/skbuff.h
include/linux/virtio_net.h
kernel/bpf/stackmap.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_kprobe.c
lib/crc32.c
mm/memblock.c
mm/page_alloc.c
net/core/dev.c
net/core/filter.c
net/core/skbuff.c
net/ieee802154/6lowpan/reassembly.c
net/ipv4/af_inet.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/mac80211/cfg.c
net/mac80211/mesh.h
net/mac80211/mesh_pathtbl.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_tables_api.c
net/rose/af_rose.c
net/sched/cls_tcindex.c
net/sunrpc/auth_gss/gss_krb5_seqnum.c
net/sunrpc/debugfs.c
net/sunrpc/xprtrdma/verbs.c
net/xdp/xdp_umem.c
net/xdp/xsk.c
tools/testing/selftests/net/tls.c
virt/kvm/arm/arm.c
virt/kvm/arm/mmu.c
virt/kvm/arm/psci.c
virt/kvm/arm/vgic/vgic-debug.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-mmio-v2.c
virt/kvm/arm/vgic/vgic-mmio-v3.c
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c

index fe46d4867e2dbfa4cde05cc0b900c157d99b67d9..18c1415e7bfad8f6e6e9b03febaf47f83a0f9915 100644 (file)
@@ -7,7 +7,7 @@ Intro
 =====
 
 The MSG_ZEROCOPY flag enables copy avoidance for socket send calls.
-The feature is currently implemented for TCP sockets.
+The feature is currently implemented for TCP and UDP sockets.
 
 
 Opportunity and Caveats
index 86cf35d1d79d7fdcc33fbf503eca37b0e5ad57c9..96c5335e7ee4c06512c0d295facc12377bedd16a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
index 664e918e26249a6dd0a43fa1c80eaf187a040821..26524b75970a3ef52bf7609fefa3626f07afcb2a 100644 (file)
@@ -1400,6 +1400,7 @@ config NR_CPUS
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
        depends on SMP
+       select GENERIC_IRQ_MIGRATION
        help
          Say Y here to experiment with turning CPUs off and on.  CPUs
          can be controlled through /sys/devices/system/cpu.
index 04758a2a87f031eb42df555cc85a85bd0bf63816..67d77eee9433c655e0bd8f0c1dbf7c25aba3ecce 100644 (file)
        };
 };
 
+/* Configure pwm clock source for timers 8 & 9 */
+&timer8 {
+       assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
+       assigned-clock-parents = <&sys_clkin_ck>;
+};
+
+&timer9 {
+       assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
+       assigned-clock-parents = <&sys_clkin_ck>;
+};
+
 /*
  * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
  * uart1 wakeirq.
index bc853ebeda221f43dfeeb78a64c60f10578eb3e6..61a06f6add3ca52a9d7c4851080c53d0a5d18ac9 100644 (file)
 
        palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
                pinctrl-single,pins = <
-                       OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+                       /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+                       OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
                >;
        };
 
 
        palmas: palmas@48 {
                compatible = "ti,palmas";
-               interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
+               /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
                reg = <0x48>;
                interrupt-controller;
                #interrupt-cells = <2>;
                pinctrl-names = "default";
                pinctrl-0 = <&twl6040_pins>;
 
-               interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
+               /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+               interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
 
                /* audpwron gpio defined in the board specific dts */
 
index 5e21fb430a65daa8e29a1ca90a404389d9dc99a9..e78d3718f145d544dee0625ada9c3bfe7ca9327b 100644 (file)
                        OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6)  /* llib_wakereqin.gpio1_wk15 */
                >;
        };
+
+       palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+               pinctrl-single,pins = <
+                       /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+                       OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
+               >;
+       };
 };
 
 &omap5_pmx_core {
 
        palmas: palmas@48 {
                compatible = "ti,palmas";
-               interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
                reg = <0x48>;
+               pinctrl-0 = <&palmas_sys_nirq_pins>;
+               pinctrl-names = "default";
+               /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
                #interrupt-cells = <2>;
                ti,system-power-controller;
index 4acb501dd3f8ef31355f2ae2adb9d6a60a386d40..3ed49898f4b2edab7e6a5efbf68ea081cfa64734 100644 (file)
                        pm_qos = <&qos_lcdc0>,
                                 <&qos_lcdc1>,
                                 <&qos_cif0>,
-                                <&qos_cif1>,
                                 <&qos_ipp>,
                                 <&qos_rga>;
                };
index c883fcbe93b67ef68bfc18a6e48d4ec53c37cdd0..46d41140df27dd9c4f15c713189db2199c1352a2 100644 (file)
@@ -25,7 +25,6 @@
 #ifndef __ASSEMBLY__
 struct irqaction;
 struct pt_regs;
-extern void migrate_irqs(void);
 
 extern void asm_do_IRQ(unsigned int, struct pt_regs *);
 void handle_IRQ(unsigned int, struct pt_regs *);
index ca56537b61bc87473bf9df15d5169cca22c4390f..50e89869178a9725f0bb6c8bb2082bc186fcbab7 100644 (file)
@@ -48,6 +48,7 @@
 #define KVM_REQ_SLEEP \
        KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_IRQ_PENDING    KVM_ARCH_REQ(1)
+#define KVM_REQ_VCPU_RESET     KVM_ARCH_REQ(2)
 
 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
@@ -147,6 +148,13 @@ struct kvm_cpu_context {
 
 typedef struct kvm_cpu_context kvm_cpu_context_t;
 
+struct vcpu_reset_state {
+       unsigned long   pc;
+       unsigned long   r0;
+       bool            be;
+       bool            reset;
+};
+
 struct kvm_vcpu_arch {
        struct kvm_cpu_context ctxt;
 
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
        /* Cache some mmu pages needed inside spinlock regions */
        struct kvm_mmu_memory_cache mmu_page_cache;
 
+       struct vcpu_reset_state reset_state;
+
        /* Detect first run of a vcpu */
        bool has_run_once;
 };
index c4b1d4fb17972ed3d638e187663607fe42bc9f0e..de2089501b8b5705a29bcb80b7007d630cfabc60 100644 (file)
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
 #define S2_PMD_MASK                            PMD_MASK
 #define S2_PMD_SIZE                            PMD_SIZE
 
+static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
+{
+       return true;
+}
+
 #endif /* __ARM_S2_PGTABLE_H_ */
index 9908dacf9229fbfa694ceebdfb2ed1b534c3f522..844861368cd5c236a113adaeab8a26d43d8ac419 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
-#include <linux/ratelimit.h>
 #include <linux/errno.h>
 #include <linux/list.h>
 #include <linux/kallsyms.h>
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
        return nr_irqs;
 }
 #endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_desc *desc)
-{
-       struct irq_data *d = irq_desc_get_irq_data(desc);
-       const struct cpumask *affinity = irq_data_get_affinity_mask(d);
-       struct irq_chip *c;
-       bool ret = false;
-
-       /*
-        * If this is a per-CPU interrupt, or the affinity does not
-        * include this CPU, then we have nothing to do.
-        */
-       if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
-               return false;
-
-       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
-               affinity = cpu_online_mask;
-               ret = true;
-       }
-
-       c = irq_data_get_irq_chip(d);
-       if (!c->irq_set_affinity)
-               pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
-               cpumask_copy(irq_data_get_affinity_mask(d), affinity);
-
-       return ret;
-}
-
-/*
- * The current CPU has been marked offline.  Migrate IRQs off this CPU.
- * If the affinity settings do not allow other CPUs, force them onto any
- * available CPU.
- *
- * Note: we must iterate over all IRQs, whether they have an attached
- * action structure or not, as we need to get chained interrupts too.
- */
-void migrate_irqs(void)
-{
-       unsigned int i;
-       struct irq_desc *desc;
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       for_each_irq_desc(i, desc) {
-               bool affinity_broken;
-
-               raw_spin_lock(&desc->lock);
-               affinity_broken = migrate_one_irq(desc);
-               raw_spin_unlock(&desc->lock);
-
-               if (affinity_broken)
-                       pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
-                               i, smp_processor_id());
-       }
-
-       local_irq_restore(flags);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
index 3bf82232b1bed4bce829749ce6af885bbc43c191..1d6f5ea522f49184c53a7d996769104107b4de8e 100644 (file)
@@ -254,7 +254,7 @@ int __cpu_disable(void)
        /*
         * OK - migrate IRQs away from this CPU
         */
-       migrate_irqs();
+       irq_migrate_all_off_this_cpu();
 
        /*
         * Flush user cache and TLB mappings, and then remove this CPU
index 222c1635bc7a1337c80e3af115df5f0696f52a7d..e8bd288fd5be909dad8ec74561330fea3a972ff7 100644 (file)
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
        reset_coproc_regs(vcpu, table, num);
 
        for (num = 1; num < NR_CP15_REGS; num++)
-               if (vcpu_cp15(vcpu, num) == 0x42424242)
-                       panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
+               WARN(vcpu_cp15(vcpu, num) == 0x42424242,
+                    "Didn't reset vcpu_cp15(vcpu, %zi)", num);
 }
index 5ed0c3ee33d66b3c8263d28d28da756693b4c935..e53327912adc67e80a93f6f4130df5b81313f902 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/cputype.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
 
 #include <kvm/arm_arch_timer.h>
 
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        /* Reset CP15 registers */
        kvm_reset_coprocs(vcpu);
 
+       /*
+        * Additional reset state handling that PSCI may have imposed on us.
+        * Must be done after all the sys_reg reset.
+        */
+       if (READ_ONCE(vcpu->arch.reset_state.reset)) {
+               unsigned long target_pc = vcpu->arch.reset_state.pc;
+
+               /* Gracefully handle Thumb2 entry point */
+               if (target_pc & 1) {
+                       target_pc &= ~1UL;
+                       vcpu_set_thumb(vcpu);
+               }
+
+               /* Propagate caller endianness */
+               if (vcpu->arch.reset_state.be)
+                       kvm_vcpu_set_be(vcpu);
+
+               *vcpu_pc(vcpu) = target_pc;
+               vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
+
+               vcpu->arch.reset_state.reset = false;
+       }
+
        /* Reset arch_timer context */
        return kvm_timer_vcpu_reset(vcpu);
 }
index a8b291f00109c653c05d47ab49abb7c9c306b238..dae514c8276aac6218fd4a7f5df711769c048769 100644 (file)
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
        mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
                                 (cx->mpu_logic_state == PWRDM_POWER_OFF);
 
+       /* Enter broadcast mode for periodic timers */
+       tick_broadcast_enable();
+
+       /* Enter broadcast mode for one-shot timers */
        tick_broadcast_enter();
 
        /*
@@ -218,15 +222,6 @@ fail:
        return index;
 }
 
-/*
- * For each cpu, setup the broadcast timer because local timers
- * stops for the states above C1.
- */
-static void omap_setup_broadcast_timer(void *arg)
-{
-       tick_broadcast_enable();
-}
-
 static struct cpuidle_driver omap4_idle_driver = {
        .name                           = "omap4_idle",
        .owner                          = THIS_MODULE,
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void)
        if (!cpu_clkdm[0] || !cpu_clkdm[1])
                return -ENODEV;
 
-       /* Configure the broadcast timer on each cpu */
-       on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
-
        return cpuidle_register(idle_driver, cpu_online_mask);
 }
index f86b72d1d59e51f4af15319df87ee61141b4fd02..1444b4b4bd9f85e54368c0e18ac31f3f2fc033eb 100644 (file)
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
        u32 enable_mask, enable_shift;
        u32 pipd_mask, pipd_shift;
        u32 reg;
+       int ret;
 
        if (dsi_id == 0) {
                enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
                return -ENODEV;
        }
 
-       regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);
+       ret = regmap_read(omap4_dsi_mux_syscon,
+                                         OMAP4_DSIPHY_SYSCON_OFFSET,
+                                         &reg);
+       if (ret)
+               return ret;
 
        reg &= ~enable_mask;
        reg &= ~pipd_mask;
index fc5fb776a7101234bd64da673815d10a0b75f0f2..17558be4bf0a528700939684e4f39c002e0215c7 100644 (file)
@@ -50,6 +50,9 @@
 #define OMAP4_NR_BANKS         4
 #define OMAP4_NR_IRQS          128
 
+#define SYS_NIRQ1_EXT_SYS_IRQ_1        7
+#define SYS_NIRQ2_EXT_SYS_IRQ_2        119
+
 static void __iomem *wakeupgen_base;
 static void __iomem *sar_base;
 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
        irq_chip_unmask_parent(d);
 }
 
+/*
+ * The sys_nirq pins bypass peripheral modules and are wired directly
+ * to MPUSS wakeupgen. They get automatically inverted for GIC.
+ */
+static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
+{
+       bool inverted = false;
+
+       switch (type) {
+       case IRQ_TYPE_LEVEL_LOW:
+               type &= ~IRQ_TYPE_LEVEL_MASK;
+               type |= IRQ_TYPE_LEVEL_HIGH;
+               inverted = true;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               type &= ~IRQ_TYPE_EDGE_BOTH;
+               type |= IRQ_TYPE_EDGE_RISING;
+               inverted = true;
+               break;
+       default:
+               break;
+       }
+
+       if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
+           d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
+               pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
+                       d->hwirq);
+
+       return irq_chip_set_type_parent(d, type);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
 
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
        .irq_mask               = wakeupgen_mask,
        .irq_unmask             = wakeupgen_unmask,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
-       .irq_set_type           = irq_chip_set_type_parent,
+       .irq_set_type           = wakeupgen_irq_set_type,
        .flags                  = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
index f1e2922e447cd07a6acca27123d3254f4fd10ba8..1e3e08a1c45677e66017cb6479049658db90dbde 100644 (file)
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev)
                return;
 
        arm_teardown_iommu_dma_ops(dev);
+       /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
+       set_dma_ops(dev, NULL);
 }
index 2c118a6ab358736e8227214b081fce343b48b29f..0dc23fc227ed2745215eeda46965dcac1524281b 100644 (file)
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
        }
 
        /* Copy arch-dep-instance from template. */
-       memcpy(code, (unsigned char *)optprobe_template_entry,
+       memcpy(code, (unsigned long *)&optprobe_template_entry,
                        TMPL_END_IDX * sizeof(kprobe_opcode_t));
 
        /* Adjust buffer according to instruction. */
index 64acccc4bfcb4d1c0cf834e42f031b99a4d8316b..f74b13aa5aa59ac962de33147ac5730d7df4a2cf 100644 (file)
 
        pinctrl_usdhc1_100mhz: usdhc1-100grp {
                fsl,pins = <
-                       MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK                 0x85
-                       MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD                 0xc5
-                       MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0             0xc5
-                       MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1             0xc5
-                       MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2             0xc5
-                       MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3             0xc5
-                       MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4             0xc5
-                       MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5             0xc5
-                       MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6             0xc5
-                       MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7             0xc5
-                       MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE           0x85
+                       MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK                 0x8d
+                       MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD                 0xcd
+                       MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0             0xcd
+                       MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1             0xcd
+                       MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2             0xcd
+                       MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3             0xcd
+                       MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4             0xcd
+                       MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5             0xcd
+                       MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6             0xcd
+                       MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7             0xcd
+                       MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE           0x8d
                        MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B         0xc1
                >;
        };
 
        pinctrl_usdhc1_200mhz: usdhc1-200grp {
                fsl,pins = <
-                       MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK                 0x87
-                       MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD                 0xc7
-                       MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0             0xc7
-                       MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1             0xc7
-                       MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2             0xc7
-                       MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3             0xc7
-                       MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4             0xc7
-                       MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5             0xc7
-                       MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6             0xc7
-                       MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7             0xc7
-                       MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE           0x87
+                       MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK                 0x9f
+                       MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD                 0xdf
+                       MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0             0xdf
+                       MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1             0xdf
+                       MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2             0xdf
+                       MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3             0xdf
+                       MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4             0xdf
+                       MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5             0xdf
+                       MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6             0xdf
+                       MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7             0xdf
+                       MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE           0x9f
                        MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B         0xc1
                >;
        };
index 8e9d6d5ed7b230656a5b621f855e063aea16dd9b..b6d31499fb431d1fc9a6a35484ad819f71dd134d 100644 (file)
                                         <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MQ_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
+                               assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>;
+                               assigned-clock-rates = <400000000>;
                                fsl,tuning-start-tap = <20>;
                                fsl,tuning-step = <2>;
                                bus-width = <4>;
index bd937d68ca3bdb99e378c069d8a9b388ec46a516..040b36ef0dd2b7e7b3cd1cb280dcc94606b99e0f 100644 (file)
@@ -40,6 +40,7 @@
                pinctrl-0 = <&usb30_host_drv>;
                regulator-name = "vcc_host_5v";
                regulator-always-on;
+               regulator-boot-on;
                vin-supply = <&vcc_sys>;
        };
 
@@ -51,6 +52,7 @@
                pinctrl-0 = <&usb20_host_drv>;
                regulator-name = "vcc_host1_5v";
                regulator-always-on;
+               regulator-boot-on;
                vin-supply = <&vcc_sys>;
        };
 
index 1ee0dc0d9f10ff9641f02bdad4aae75fc225d078..d1cf404b87084a00b18d55b26d50681b68ce48d5 100644 (file)
@@ -22,7 +22,7 @@
                backlight = <&backlight>;
                power-supply = <&pp3300_disp>;
 
-               ports {
+               port {
                        panel_in_edp: endpoint {
                                remote-endpoint = <&edp_out_panel>;
                        };
index 81e73103fa788605471679fd1b0868f50822c4f8..15e254a7739120592c1905253ab8ffe4f9c40cc1 100644 (file)
@@ -43,7 +43,7 @@
                backlight = <&backlight>;
                power-supply = <&pp3300_disp>;
 
-               ports {
+               port {
                        panel_in_edp: endpoint {
                                remote-endpoint = <&edp_out_panel>;
                        };
index 0b8f1edbd746b5ce0359e60f8279ca11b6633322..b48a63c3efc3d4bc2563e05a53c4d87c0d740232 100644 (file)
@@ -91,7 +91,7 @@
                pinctrl-0 = <&lcd_panel_reset>;
                power-supply = <&vcc3v3_s0>;
 
-               ports {
+               port {
                        panel_in_edp: endpoint {
                                remote-endpoint = <&edp_out_panel>;
                        };
index 7732d0ba4e603210fedb645011488ce5ef31a99e..da3fc7324d6826b7b600c4167c77b1f4bd494cf1 100644 (file)
@@ -48,6 +48,7 @@
 #define KVM_REQ_SLEEP \
        KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_IRQ_PENDING    KVM_ARCH_REQ(1)
+#define KVM_REQ_VCPU_RESET     KVM_ARCH_REQ(2)
 
 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
@@ -208,6 +209,13 @@ struct kvm_cpu_context {
 
 typedef struct kvm_cpu_context kvm_cpu_context_t;
 
+struct vcpu_reset_state {
+       unsigned long   pc;
+       unsigned long   r0;
+       bool            be;
+       bool            reset;
+};
+
 struct kvm_vcpu_arch {
        struct kvm_cpu_context ctxt;
 
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch {
        /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
        u64 vsesr_el2;
 
+       /* Additional reset state */
+       struct vcpu_reset_state reset_state;
+
        /* True when deferrable sysregs are loaded on the physical CPU,
         * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
        bool sysregs_loaded_on_cpu;
index e1ec947e7c0cabc08837ef37318bf66db5c05d76..0c656850eeeaa9df40f0e7b3a643228ea33ec9d7 100644 (file)
@@ -332,6 +332,17 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define virt_addr_valid(kaddr)         \
        (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
 
+/*
+ * Given that the GIC architecture permits ITS implementations that can only be
+ * configured with a LPI table address once, GICv3 systems with many CPUs may
+ * end up reserving a lot of different regions after a kexec for their LPI
+ * tables (one per CPU), as we are forced to reuse the same memory after kexec
+ * (and thus reserve it persistently with EFI beforehand)
+ */
+#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
+# define INIT_MEMBLOCK_RESERVED_REGIONS        (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
+#endif
+
 #include <asm-generic/memory_model.h>
 
 #endif
index 4b0e1231625cd6bd54fce6a4cd5ec15b98b73953..d09ec76f08cfcee42e9fcc4a12e8a1760e8ac7bb 100644 (file)
@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p)
        arm64_memblock_init();
 
        paging_init();
-       efi_apply_persistent_mem_reservations();
 
        acpi_table_upgrade();
 
index b0b1478094b4c56907dcc1040aac9985f52187e4..421ebf6f708630416a41a975f9b6c2a7d6eff1fe 100644 (file)
@@ -23,6 +23,7 @@
 #include <kvm/arm_psci.h>
 
 #include <asm/cpufeature.h>
+#include <asm/kprobes.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_host.h>
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
 
        write_sysreg(kvm_get_hyp_vector(), vbar_el1);
 }
+NOKPROBE_SYMBOL(activate_traps_vhe);
 
 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
 {
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void)
        write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
        write_sysreg(vectors, vbar_el1);
 }
+NOKPROBE_SYMBOL(deactivate_traps_vhe);
 
 static void __hyp_text __deactivate_traps_nvhe(void)
 {
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 
        return exit_code;
 }
+NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
 
 /* Switch to the guest for legacy non-VHE systems */
 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
              read_sysreg_el2(esr),   read_sysreg_el2(far),
              read_sysreg(hpfar_el2), par, vcpu);
 }
+NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
 
 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
 {
index 68d6f7c3b237dc1713fa36bc55d4a27fef2e5949..b426e2cf973cfe01a90ae40545abb2ee46c66bca 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/compiler.h>
 #include <linux/kvm_host.h>
 
+#include <asm/kprobes.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
 {
        __sysreg_save_common_state(ctxt);
 }
+NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
 
 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
 {
        __sysreg_save_common_state(ctxt);
        __sysreg_save_el2_return_state(ctxt);
 }
+NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
 
 static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
 {
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
 {
        __sysreg_restore_common_state(ctxt);
 }
+NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
 
 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
 {
        __sysreg_restore_common_state(ctxt);
        __sysreg_restore_el2_return_state(ctxt);
 }
+NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
 
 void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
 {
index b72a3dd562044584355bc94fce99efb95e694484..f16a5f8ff2b41fa4284da58d1d2caf7af7b46e7b 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
 #include <asm/kvm_mmu.h>
 
 /* Maximum phys_shift supported for any VM on this host */
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
  * This function finds the right table above and sets the registers on
  * the virtual CPU struct to their architecturally defined reset
  * values.
+ *
+ * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
+ * ioctl or as part of handling a request issued by another VCPU in the PSCI
+ * handling code.  In the first case, the VCPU will not be loaded, and in the
+ * second case the VCPU will be loaded.  Because this function operates purely
+ * on the memory-backed valus of system registers, we want to do a full put if
+ * we were loaded (handling a request) and load the values back at the end of
+ * the function.  Otherwise we leave the state alone.  In both cases, we
+ * disable preemption around the vcpu reset as we would otherwise race with
+ * preempt notifiers which also call put/load.
  */
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 {
        const struct kvm_regs *cpu_reset;
+       int ret = -EINVAL;
+       bool loaded;
+
+       preempt_disable();
+       loaded = (vcpu->cpu != -1);
+       if (loaded)
+               kvm_arch_vcpu_put(vcpu);
 
        switch (vcpu->arch.target) {
        default:
                if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
                        if (!cpu_has_32bit_el1())
-                               return -EINVAL;
+                               goto out;
                        cpu_reset = &default_regs_reset32;
                } else {
                        cpu_reset = &default_regs_reset;
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        /* Reset system registers */
        kvm_reset_sys_regs(vcpu);
 
+       /*
+        * Additional reset state handling that PSCI may have imposed on us.
+        * Must be done after all the sys_reg reset.
+        */
+       if (vcpu->arch.reset_state.reset) {
+               unsigned long target_pc = vcpu->arch.reset_state.pc;
+
+               /* Gracefully handle Thumb2 entry point */
+               if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
+                       target_pc &= ~1UL;
+                       vcpu_set_thumb(vcpu);
+               }
+
+               /* Propagate caller endianness */
+               if (vcpu->arch.reset_state.be)
+                       kvm_vcpu_set_be(vcpu);
+
+               *vcpu_pc(vcpu) = target_pc;
+               vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
+
+               vcpu->arch.reset_state.reset = false;
+       }
+
        /* Reset PMU */
        kvm_pmu_vcpu_reset(vcpu);
 
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
 
        /* Reset timer */
-       return kvm_timer_vcpu_reset(vcpu);
+       ret = kvm_timer_vcpu_reset(vcpu);
+out:
+       if (loaded)
+               kvm_arch_vcpu_load(vcpu, smp_processor_id());
+       preempt_enable();
+       return ret;
 }
 
 void kvm_set_ipa_limit(void)
index e3e37228ae4e86f54e88a9fcbd8c8005636c93f5..c936aa40c3f4a0393d03ee66e4b8316c35fa0566 100644 (file)
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
                return read_zero(vcpu, p);
 }
 
-static bool trap_undef(struct kvm_vcpu *vcpu,
-                      struct sys_reg_params *p,
-                      const struct sys_reg_desc *r)
+/*
+ * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
+ * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
+ * system, these registers should UNDEF. LORID_EL1 being a RO register, we
+ * treat it separately.
+ */
+static bool trap_loregion(struct kvm_vcpu *vcpu,
+                         struct sys_reg_params *p,
+                         const struct sys_reg_desc *r)
 {
-       kvm_inject_undefined(vcpu);
-       return false;
+       u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+       u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
+                        (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+
+       if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
+               kvm_inject_undefined(vcpu);
+               return false;
+       }
+
+       if (p->is_write && sr == SYS_LORID_EL1)
+               return write_to_read_only(vcpu, p, r);
+
+       return trap_raz_wi(vcpu, p, r);
 }
 
 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
                if (val & ptrauth_mask)
                        kvm_debug("ptrauth unsupported for guests, suppressing\n");
                val &= ~ptrauth_mask;
-       } else if (id == SYS_ID_AA64MMFR1_EL1) {
-               if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
-                       kvm_debug("LORegions unsupported for guests, suppressing\n");
-
-               val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
        }
 
        return val;
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
 
-       { SYS_DESC(SYS_LORSA_EL1), trap_undef },
-       { SYS_DESC(SYS_LOREA_EL1), trap_undef },
-       { SYS_DESC(SYS_LORN_EL1), trap_undef },
-       { SYS_DESC(SYS_LORC_EL1), trap_undef },
-       { SYS_DESC(SYS_LORID_EL1), trap_undef },
+       { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
+       { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
+       { SYS_DESC(SYS_LORN_EL1), trap_loregion },
+       { SYS_DESC(SYS_LORC_EL1), trap_loregion },
+       { SYS_DESC(SYS_LORID_EL1), trap_loregion },
 
        { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
        { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
        table = get_target_table(vcpu->arch.target, true, &num);
        reset_sys_reg_descs(vcpu, table, num);
 
-       for (num = 1; num < NR_SYS_REGS; num++)
-               if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
-                       panic("Didn't reset __vcpu_sys_reg(%zi)", num);
+       for (num = 1; num < NR_SYS_REGS; num++) {
+               if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
+                        "Didn't reset __vcpu_sys_reg(%zi)\n", num))
+                       break;
+       }
 }
index edfcbb25fd9f6998ae09c53b93b229018de03b89..dcea277c09aea8787325abbe50e18e3c467fc993 100644 (file)
@@ -45,8 +45,8 @@
        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
 #define pmd_page(pmd)  (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
 #define pte_clear(mm, addr, ptep)      set_pte((ptep), \
-                       (((unsigned int)addr&0x80000000)?__pte(1):__pte(0)))
-#define pte_none(pte)  (!(pte_val(pte)&0xfffffffe))
+       (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
+#define pte_none(pte)          (!(pte_val(pte) & ~_PAGE_GLOBAL))
 #define pte_present(pte)       (pte_val(pte) & _PAGE_PRESENT)
 #define pte_pfn(x)     ((unsigned long)((x).pte_low >> PAGE_SHIFT))
 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
 
 #define pgd_index(address)     ((address) >> PGDIR_SHIFT)
 
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                                    unsigned long size, pgprot_t vma_prot);
+
 /*
  * Macro to make mark a page protection value as "uncacheable".  Note
  * that "protection" is really a misnomer here as the protection value
index 8f454810514f21be1356bd9d763f28e68f8d70b5..21e0bd5293dde27b0b21a9168b2dc62a1e7085d1 100644 (file)
@@ -49,7 +49,7 @@ struct thread_struct {
 };
 
 #define INIT_THREAD  { \
-       .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \
+       .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
        .sr = DEFAULT_PSR_VALUE, \
 }
 
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define KSTK_ESP(tsk)          (task_pt_regs(tsk)->usp)
 
 #define task_pt_regs(p) \
-       ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1)
+       ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
 
 #define cpu_relax() barrier()
 
index 659253e9989cb062e84f38c04ce9d4219ebf1d2e..d67f9777cfd9d7a10ca0b439937fd9784f88f8e6 100644 (file)
@@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack)
                if (task)
                        stack = (unsigned long *)thread_saved_fp(task);
                else
+#ifdef CONFIG_STACKTRACE
+                       asm volatile("mov %0, r8\n":"=r"(stack)::"memory");
+#else
                        stack = (unsigned long *)&stack;
+#endif
        }
 
        show_trace(stack);
index 57f1afe19a52cb7896021a47691fc17c37d0b4bb..f2f12fff36f70c90d260c8052e6ba5feb69b306c 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/ptrace.h>
 #include <linux/regset.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 #include <linux/signal.h>
 #include <linux/smp.h>
 #include <linux/uaccess.h>
@@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target,
 static const struct user_regset csky_regsets[] = {
        [REGSET_GPR] = {
                .core_note_type = NT_PRSTATUS,
-               .n = ELF_NGREG,
+               .n = sizeof(struct pt_regs) / sizeof(u32),
                .size = sizeof(u32),
                .align = sizeof(u32),
                .get = &gpr_get,
index ddc4dd79f2826f837b3557a239043f7b0aa1c162..b07a534b30627fa93d39edfed3090e57b29bb04e 100644 (file)
@@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
        unsigned long mask = 1 << cpu;
 
-       secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8;
+       secondary_stack =
+               (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
        secondary_hint = mfcr("cr31");
        secondary_ccr  = mfcr("cr18");
 
index cb7c03e5cd218a4236d1cb686a2cc36f0e4f91ff..8473b6bdf51205b35b761dac33f6456d1216072f 100644 (file)
@@ -46,3 +46,17 @@ void iounmap(void __iomem *addr)
        vunmap((void *)((unsigned long)addr & PAGE_MASK));
 }
 EXPORT_SYMBOL(iounmap);
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                             unsigned long size, pgprot_t vma_prot)
+{
+       if (!pfn_valid(pfn)) {
+               vma_prot.pgprot |= _PAGE_SO;
+               return pgprot_noncached(vma_prot);
+       } else if (file->f_flags & O_SYNC) {
+               return pgprot_noncached(vma_prot);
+       }
+
+       return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
index b16710a8a9e7a2ee7d4372fa81635a4c7fdc79a9..76e9bf88d3b9198f6b1e8baaf8aa620bd1755959 100644 (file)
@@ -79,8 +79,6 @@ enum reg_val_type {
        REG_64BIT_32BIT,
        /* 32-bit compatible, need truncation for 64-bit ops. */
        REG_32BIT,
-       /* 32-bit zero extended. */
-       REG_32BIT_ZERO_EX,
        /* 32-bit no sign/zero extension needed. */
        REG_32BIT_POS
 };
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
        const struct bpf_prog *prog = ctx->skf;
        int stack_adjust = ctx->stack_size;
        int store_offset = stack_adjust - 8;
+       enum reg_val_type td;
        int r0 = MIPS_R_V0;
 
-       if (dest_reg == MIPS_R_RA &&
-           get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+       if (dest_reg == MIPS_R_RA) {
                /* Don't let zero extended value escape. */
-               emit_instr(ctx, sll, r0, r0, 0);
+               td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
+               if (td == REG_64BIT)
+                       emit_instr(ctx, sll, r0, r0, 0);
+       }
 
        if (ctx->flags & EBPF_SAVE_RA) {
                emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                if (dst < 0)
                        return dst;
                td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
-               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+               if (td == REG_64BIT) {
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                }
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                if (dst < 0)
                        return dst;
                td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
-               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+               if (td == REG_64BIT) {
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                }
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                if (dst < 0)
                        return dst;
                td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
-               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+               if (td == REG_64BIT)
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                if (insn->imm == 1) {
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                if (src < 0 || dst < 0)
                        return -EINVAL;
                td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
-               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+               if (td == REG_64BIT) {
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                }
                did_move = false;
                ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
-               if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
+               if (ts == REG_64BIT) {
                        int tmp_reg = MIPS_R_AT;
 
                        if (bpf_op == BPF_MOV) {
@@ -1254,8 +1255,7 @@ jeq_common:
                if (insn->imm == 64 && td == REG_32BIT)
                        emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
 
-               if (insn->imm != 64 &&
-                   (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
+               if (insn->imm != 64 && td == REG_64BIT) {
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                }
index c9bfe526ca9df86c0f5b2aab8cbf063cd32ed81b..d8c8d7c9df1510451367dca149df932b30648720 100644 (file)
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud)
 
 static inline int pud_present(pud_t pud)
 {
-       return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
+       return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
 }
 
 extern struct page *pud_page(pud_t pud);
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd)
 
 static inline int pgd_present(pgd_t pgd)
 {
-       return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
+       return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
 }
 
 static inline pte_t pgd_pte(pgd_t pgd)
index 374a19712e2009a0cbcb0c3048d4489cf5c2f9d9..b684f0294f35d93bb5e7367a7d7903719c25c6a6 100644 (file)
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
                x86_pmu.check_microcode();
 }
 
+static int x86_pmu_check_period(struct perf_event *event, u64 value)
+{
+       if (x86_pmu.check_period && x86_pmu.check_period(event, value))
+               return -EINVAL;
+
+       if (value && x86_pmu.limit_period) {
+               if (x86_pmu.limit_period(event, value) > value)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 static struct pmu pmu = {
        .pmu_enable             = x86_pmu_enable,
        .pmu_disable            = x86_pmu_disable,
@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
        .event_idx              = x86_pmu_event_idx,
        .sched_task             = x86_pmu_sched_task,
        .task_ctx_size          = sizeof(struct x86_perf_task_context),
+       .check_period           = x86_pmu_check_period,
 };
 
 void arch_perf_update_userpage(struct perf_event *event,
index daafb893449b6e1fb11c7f6b18e71d90da553909..730978dff63f5e577628936e26183b5a36da0cca 100644 (file)
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
        intel_pmu_lbr_sched_task(ctx, sched_in);
 }
 
+static int intel_pmu_check_period(struct perf_event *event, u64 value)
+{
+       return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
+}
+
 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
 
 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = {
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
        .cpu_dead               = intel_pmu_cpu_dead,
+
+       .check_period           = intel_pmu_check_period,
 };
 
 static struct attribute *intel_pmu_attrs[];
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = {
 
        .guest_get_msrs         = intel_guest_get_msrs,
        .sched_task             = intel_pmu_sched_task,
+
+       .check_period           = intel_pmu_check_period,
 };
 
 static __init void intel_clovertown_quirk(void)
index 78d7b7031bfccb8ec2dbcd7de6e54a29fd1365ce..d46fd6754d920b0c5331c2658a55e1685b111957 100644 (file)
@@ -646,6 +646,11 @@ struct x86_pmu {
         * Intel host/guest support (KVM)
         */
        struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
+
+       /*
+        * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
+        */
+       int (*check_period) (struct perf_event *event, u64 period);
 };
 
 struct x86_perf_task_context {
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)
 
 #ifdef CONFIG_CPU_SUP_INTEL
 
-static inline bool intel_pmu_has_bts(struct perf_event *event)
+static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
 {
        struct hw_perf_event *hwc = &event->hw;
        unsigned int hw_event, bts_event;
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
        hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
        bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
 
-       return hw_event == bts_event && hwc->sample_period == 1;
+       return hw_event == bts_event && period == 1;
+}
+
+static inline bool intel_pmu_has_bts(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       return intel_pmu_has_bts_period(event, hwc->sample_period);
 }
 
 int intel_pmu_save_and_restart(struct perf_event *event);
index f65b78d32f5eb7bb0a926c7c4e49b41cb0e66969..7dbbe9ffda173dd25456c1ca70b34a8883a98c60 100644 (file)
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
 /*
  * fill in the user structure for a core dump..
  */
-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
+static void fill_dump(struct pt_regs *regs, struct user32 *dump)
 {
        u32 fs, gs;
        memset(dump, 0, sizeof(*dump));
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
        fs = get_fs();
        set_fs(KERNEL_DS);
        has_dumped = 1;
+
+       fill_dump(cprm->regs, &dump);
+
        strncpy(dump.u_comm, current->comm, sizeof(current->comm));
        dump.u_ar0 = offsetof(struct user32, regs);
        dump.signal = cprm->siginfo->si_signo;
-       dump_thread32(cprm->regs, &dump);
 
        /*
         * If the size of the dump file exceeds the rlimit, then see
index d9a9993af882ad188b102dbbf436b4be31ddc321..9f15384c504a407818168966610b36d3798a6683 100644 (file)
@@ -52,6 +52,8 @@
 
 #define INTEL_FAM6_CANNONLAKE_MOBILE   0x66
 
+#define INTEL_FAM6_ICELAKE_MOBILE      0x7E
+
 /* "Small Core" Processors (Atom) */
 
 #define INTEL_FAM6_ATOM_BONNELL                0x1C /* Diamondville, Pineview */
index e652a7cc61863667fad8ff2baa0a7242485600df..3f697a9e3f59b37e4fa37cf0f672114a2d84312b 100644 (file)
@@ -48,7 +48,8 @@ enum {
        BIOS_STATUS_SUCCESS             =  0,
        BIOS_STATUS_UNIMPLEMENTED       = -ENOSYS,
        BIOS_STATUS_EINVAL              = -EINVAL,
-       BIOS_STATUS_UNAVAIL             = -EBUSY
+       BIOS_STATUS_UNAVAIL             = -EBUSY,
+       BIOS_STATUS_ABORT               = -EINTR,
 };
 
 /* Address map parameters */
@@ -167,4 +168,9 @@ extern long system_serial_number;
 
 extern struct kobject *sgi_uv_kobj;    /* /sys/firmware/sgi_uv */
 
+/*
+ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
+ */
+extern struct semaphore __efi_uv_runtime_lock;
+
 #endif /* _ASM_X86_UV_BIOS_H */
index d8ea4ebd79e787050e0adfd861bb3df56aa3ec66..d737a51a53ca368b3a223e2ed41e397a7abbafbd 100644 (file)
@@ -2473,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
            (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
                return -EINVAL;
 
+       if (!nested_cpu_has_preemption_timer(vmcs12) &&
+           nested_cpu_has_save_preemption_timer(vmcs12))
+               return -EINVAL;
+
        if (nested_cpu_has_ept(vmcs12) &&
            !valid_ept_address(vcpu, vmcs12->ept_pointer))
                return -EINVAL;
@@ -5557,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
         * secondary cpu-based controls.  Do not include those that
         * depend on CPUID bits, they are added later by vmx_cpuid_update.
         */
-       rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
-               msrs->secondary_ctls_low,
-               msrs->secondary_ctls_high);
+       if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
+               rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
+                     msrs->secondary_ctls_low,
+                     msrs->secondary_ctls_high);
+
        msrs->secondary_ctls_low = 0;
        msrs->secondary_ctls_high &=
                SECONDARY_EXEC_DESC |
index 95d61804500199934ca167f4bc37892401d43855..30a6bcd735ec36e4163b6e9a72ce26ff3f6b356a 100644 (file)
@@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
        if (!entry_only)
                j = find_msr(&m->host, msr);
 
-       if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+       if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
+               (j < 0 &&  m->host.nr == NR_AUTOLOAD_MSRS)) {
                printk_once(KERN_WARNING "Not enough msr switch entries. "
                                "Can't add msr %x\n", msr);
                return;
@@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
        if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
                return;
 
-       /*
-        * First handle the simple case where no cmpxchg is necessary; just
-        * allow posting non-urgent interrupts.
-        *
-        * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
-        * PI.NDST: pi_post_block will do it for us and the wakeup_handler
-        * expects the VCPU to be on the blocked_vcpu_list that matches
-        * PI.NDST.
-        */
-       if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
-           vcpu->cpu == cpu) {
-               pi_clear_sn(pi_desc);
-               return;
-       }
-
        /* The full case.  */
        do {
                old.control = new.control = pi_desc->control;
@@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
                new.sn = 0;
        } while (cmpxchg64(&pi_desc->control, old.control,
                           new.control) != old.control);
+
+       /*
+        * Clear SN before reading the bitmap.  The VT-d firmware
+        * writes the bitmap and reads SN atomically (5.2.3 in the
+        * spec), so it doesn't really have a memory barrier that
+        * pairs with this, but we cannot do that and we need one.
+        */
+       smp_mb__after_atomic();
+
+       if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
+               pi_set_on(pi_desc);
 }
 
 /*
index 99328954c2fc1c43d8573f727b0e9219a2828065..0ac0a64c7790fb772b31d61179480548e6d06082 100644 (file)
@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
        return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
 }
 
-static inline void pi_clear_sn(struct pi_desc *pi_desc)
+static inline void pi_set_sn(struct pi_desc *pi_desc)
 {
-       return clear_bit(POSTED_INTR_SN,
+       return set_bit(POSTED_INTR_SN,
                        (unsigned long *)&pi_desc->control);
 }
 
-static inline void pi_set_sn(struct pi_desc *pi_desc)
+static inline void pi_set_on(struct pi_desc *pi_desc)
 {
-       return set_bit(POSTED_INTR_SN,
-                       (unsigned long *)&pi_desc->control);
+       set_bit(POSTED_INTR_ON,
+               (unsigned long *)&pi_desc->control);
 }
 
 static inline void pi_clear_on(struct pi_desc *pi_desc)
index e67ecf25e69087ff6aa8711001a59b175477a3b1..941f932373d03547f606ea6a20fd6161c48c6398 100644 (file)
@@ -7801,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
         * 1) We should set ->mode before checking ->requests.  Please see
         * the comment in kvm_vcpu_exiting_guest_mode().
         *
-        * 2) For APICv, we should set ->mode before checking PIR.ON.  This
+        * 2) For APICv, we should set ->mode before checking PID.ON. This
         * pairs with the memory barrier implicit in pi_test_and_set_on
         * (see vmx_deliver_posted_interrupt).
         *
index 4a6a5a26c58295e4245b09be21a658c3ddae86a2..eb33432f2f241db7475e32e651ae2bc9ca526361 100644 (file)
@@ -29,7 +29,8 @@
 
 struct uv_systab *uv_systab;
 
-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+                       u64 a4, u64 a5)
 {
        struct uv_systab *tab = uv_systab;
        s64 ret;
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
 
        return ret;
 }
+
+s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+{
+       s64 ret;
+
+       if (down_interruptible(&__efi_uv_runtime_lock))
+               return BIOS_STATUS_ABORT;
+
+       ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
+       up(&__efi_uv_runtime_lock);
+
+       return ret;
+}
 EXPORT_SYMBOL_GPL(uv_bios_call);
 
 s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
        unsigned long bios_flags;
        s64 ret;
 
+       if (down_interruptible(&__efi_uv_runtime_lock))
+               return BIOS_STATUS_ABORT;
+
        local_irq_save(bios_flags);
-       ret = uv_bios_call(which, a1, a2, a3, a4, a5);
+       ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
        local_irq_restore(bios_flags);
 
+       up(&__efi_uv_runtime_lock);
+
        return ret;
 }
 
index 17eb09d222ff4e865ccd3c8766d54aad5bfaf34f..ec78a04eb136e8bfd31e9ce3ab2d1b0a7ed29be1 100644 (file)
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
 
 int af_alg_release(struct socket *sock)
 {
-       if (sock->sk)
+       if (sock->sk) {
                sock_put(sock->sk);
+               sock->sk = NULL;
+       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(af_alg_release);
index a43276c76fc688a5e158a0c930c720845fccbfd0..21393ec3b9a4a58fe13037f59be6a3d1466f747e 100644 (file)
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client)
        struct ht16k33_priv *priv = i2c_get_clientdata(client);
        struct ht16k33_fbdev *fbdev = &priv->fbdev;
 
-       cancel_delayed_work(&fbdev->work);
+       cancel_delayed_work_sync(&fbdev->work);
        unregister_framebuffer(fbdev->info);
        framebuffer_release(fbdev->info);
        free_page((unsigned long) fbdev->buffer);
index f94d33525771bace16dd6ffc3e5fc47e53a28602..d299ec79e4c38bd4b5117b87bd75d639fe596ea1 100644 (file)
@@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
                   SYSC_QUIRK_LEGACY_IDLE),
        SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
-                  SYSC_QUIRK_LEGACY_IDLE),
+                  0),
        /* Some timers on omap4 and later */
        SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
-                  SYSC_QUIRK_LEGACY_IDLE),
+                  0),
        SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
-                  SYSC_QUIRK_LEGACY_IDLE),
+                  0),
        SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
                   SYSC_QUIRK_LEGACY_IDLE),
        /* Uarts on omap4 and later */
index 595124074821a93807a893a3336a6c5089ddf795..c364027638e1aeccdb02c76963ad3212a13096bc 100644 (file)
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
        if (IS_ERR(parent))
                return -ENODEV;
 
+       /* Bail out if both clocks point to fck */
+       if (clk_is_match(parent, timer->fclk))
+               return 0;
+
        ret = clk_set_parent(timer->fclk, parent);
        if (ret < 0)
                pr_err("%s: failed to set parent\n", __func__);
@@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
        timer->pdev = pdev;
 
        pm_runtime_enable(dev);
-       pm_runtime_irq_safe(dev);
 
        if (!timer->reserved) {
                ret = pm_runtime_get_sync(dev);
index 4c46ff6f2242d93874bc9af700283eab370cbdb2..55b77c576c428e96c8d10615087470916ede502a 100644 (file)
@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
 
                early_memunmap(tbl, sizeof(*tbl));
        }
-       return 0;
-}
 
-int __init efi_apply_persistent_mem_reservations(void)
-{
        if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
                unsigned long prsv = efi.mem_reserve;
 
index eee42d5e25eecacbcf379ef9138f73f1e6ffb27b..c037c6c5d0b76505f9993b59a37ed2a993575e9e 100644 (file)
@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
        efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
        efi_status_t status;
 
-       if (IS_ENABLED(CONFIG_ARM))
-               return;
-
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
                                (void **)&rsv);
        if (status != EFI_SUCCESS) {
index 8903b9ccfc2b8da6cdc5341fef8619744d904c88..e2abfdb5cee6a2339e73ba6ec753dce1f83b2dbd 100644 (file)
@@ -146,6 +146,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
  */
 static DEFINE_SEMAPHORE(efi_runtime_lock);
 
+/*
+ * Expose the EFI runtime lock to the UV platform
+ */
+#ifdef CONFIG_X86_UV
+extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
+#endif
+
 /*
  * Calls the appropriate efi_runtime_service() with the appropriate
  * arguments.
index ec6e69aa3a8e5ac455c0ede516d96b75fc7a87a8..d2fbb4bb4a4371b5954789f0221c5c65216b7614 100644 (file)
@@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
        bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
 }
 
+static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev)
+{
+       i2c_dev->curr_msg = NULL;
+       i2c_dev->num_msgs = 0;
+
+       i2c_dev->msg_buf = NULL;
+       i2c_dev->msg_buf_remaining = 0;
+}
+
 /*
  * Note about I2C_C_CLEAR on error:
  * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
@@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 
        time_left = wait_for_completion_timeout(&i2c_dev->completion,
                                                adap->timeout);
+
+       bcm2835_i2c_finish_transfer(i2c_dev);
+
        if (!time_left) {
                bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
                                   BCM2835_I2C_C_CLEAR);
index b13605718291619f29e8fc6d21bb513ef513b21e..d917cefc5a19c62882234fa9f88ea382007bf2a3 100644 (file)
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
         * Check for the message size against FIFO depth and set the
         * 'hold bus' bit if it is greater than FIFO depth.
         */
-       if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
+       if ((id->recv_count > CDNS_I2C_FIFO_DEPTH)  || id->bus_hold_flag)
                ctrl_reg |= CDNS_I2C_CR_HOLD;
+       else
+               ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
 
        cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
 
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
         * Check for the message size against FIFO depth and set the
         * 'hold bus' bit if it is greater than FIFO depth.
         */
-       if (id->send_count > CDNS_I2C_FIFO_DEPTH)
+       if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
                ctrl_reg |= CDNS_I2C_CR_HOLD;
+       else
+               ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
+
        cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
 
        /* Clear the interrupts in interrupt status register. */
index 4713957b0cbba11e942a01c2b2138854d648637f..a878351f16439859e3931ec71506c705eb9a6f6e 100644 (file)
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
 
 config KEYBOARD_SNVS_PWRKEY
        tristate "IMX SNVS Power Key Driver"
-       depends on SOC_IMX6SX
+       depends on SOC_IMX6SX || SOC_IMX7D
        depends on OF
        help
          This is the snvs powerkey driver for the Freescale i.MX application
index 312916f99597ad5dc6949296edc7b42cd459f9b8..73686c2460ce2e94e9fe757a5ad6e49852fd5170 100644 (file)
@@ -75,9 +75,7 @@
 struct cap11xx_led {
        struct cap11xx_priv *priv;
        struct led_classdev cdev;
-       struct work_struct work;
        u32 reg;
-       enum led_brightness new_brightness;
 };
 #endif
 
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev)
 }
 
 #ifdef CONFIG_LEDS_CLASS
-static void cap11xx_led_work(struct work_struct *work)
+static int cap11xx_led_set(struct led_classdev *cdev,
+                           enum led_brightness value)
 {
-       struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
+       struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
        struct cap11xx_priv *priv = led->priv;
-       int value = led->new_brightness;
 
        /*
-        * All LEDs share the same duty cycle as this is a HW limitation.
-        * Brightness levels per LED are either 0 (OFF) and 1 (ON).
+        * All LEDs share the same duty cycle as this is a HW
+        * limitation. Brightness levels per LED are either
+        * 0 (OFF) and 1 (ON).
         */
-       regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
-                               BIT(led->reg), value ? BIT(led->reg) : 0);
-}
-
-static void cap11xx_led_set(struct led_classdev *cdev,
-                          enum led_brightness value)
-{
-       struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
-
-       if (led->new_brightness == value)
-               return;
-
-       led->new_brightness = value;
-       schedule_work(&led->work);
+       return regmap_update_bits(priv->regmap,
+                                 CAP11XX_REG_LED_OUTPUT_CONTROL,
+                                 BIT(led->reg),
+                                 value ? BIT(led->reg) : 0);
 }
 
 static int cap11xx_init_leds(struct device *dev,
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev,
                led->cdev.default_trigger =
                        of_get_property(child, "linux,default-trigger", NULL);
                led->cdev.flags = 0;
-               led->cdev.brightness_set = cap11xx_led_set;
+               led->cdev.brightness_set_blocking = cap11xx_led_set;
                led->cdev.max_brightness = 1;
                led->cdev.brightness = LED_OFF;
 
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev,
                led->reg = reg;
                led->priv = priv;
 
-               INIT_WORK(&led->work, cap11xx_led_work);
-
                error = devm_led_classdev_register(dev, &led->cdev);
                if (error) {
                        of_node_put(child);
index 403452ef00e6f257d67ca44bdf5626b0e5cc53a4..3d1cb7bf5e35feba1b355cef7e81c657db97be07 100644 (file)
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
        keypad->stopped = true;
        spin_unlock_irq(&keypad->lock);
 
-       flush_work(&keypad->work.work);
+       flush_delayed_work(&keypad->work);
        /*
         * matrix_keypad_scan() will leave IRQs enabled;
         * we should disable them now.
index 43b86482dda01b51c1d4ff38353756cd75bcfc5b..d466bc07aebb2b3de14fd6994d7bd3c15c6807ee 100644 (file)
@@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = {
 struct qt2160_led {
        struct qt2160_data *qt2160;
        struct led_classdev cdev;
-       struct work_struct work;
        char name[32];
        int id;
-       enum led_brightness new_brightness;
+       enum led_brightness brightness;
 };
 #endif
 
@@ -74,7 +73,6 @@ struct qt2160_data {
        u16 key_matrix;
 #ifdef CONFIG_LEDS_CLASS
        struct qt2160_led leds[QT2160_NUM_LEDS_X];
-       struct mutex led_lock;
 #endif
 };
 
@@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
 
 #ifdef CONFIG_LEDS_CLASS
 
-static void qt2160_led_work(struct work_struct *work)
+static int qt2160_led_set(struct led_classdev *cdev,
+                         enum led_brightness value)
 {
-       struct qt2160_led *led = container_of(work, struct qt2160_led, work);
+       struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
        struct qt2160_data *qt2160 = led->qt2160;
        struct i2c_client *client = qt2160->client;
-       int value = led->new_brightness;
        u32 drive, pwmen;
 
-       mutex_lock(&qt2160->led_lock);
-
-       drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
-       pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
-       if (value != LED_OFF) {
-               drive |= (1 << led->id);
-               pwmen |= (1 << led->id);
-
-       } else {
-               drive &= ~(1 << led->id);
-               pwmen &= ~(1 << led->id);
-       }
-       qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
-       qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
+       if (value != led->brightness) {
+               drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
+               pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
+               if (value != LED_OFF) {
+                       drive |= BIT(led->id);
+                       pwmen |= BIT(led->id);
 
-       /*
-        * Changing this register will change the brightness
-        * of every LED in the qt2160. It's a HW limitation.
-        */
-       if (value != LED_OFF)
-               qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
+               } else {
+                       drive &= ~BIT(led->id);
+                       pwmen &= ~BIT(led->id);
+               }
+               qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
+               qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
 
-       mutex_unlock(&qt2160->led_lock);
-}
+               /*
+                * Changing this register will change the brightness
+                * of every LED in the qt2160. It's a HW limitation.
+                */
+               if (value != LED_OFF)
+                       qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
 
-static void qt2160_led_set(struct led_classdev *cdev,
-                          enum led_brightness value)
-{
-       struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
+               led->brightness = value;
+       }
 
-       led->new_brightness = value;
-       schedule_work(&led->work);
+       return 0;
 }
 
 #endif /* CONFIG_LEDS_CLASS */
@@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
        int ret;
        int i;
 
-       mutex_init(&qt2160->led_lock);
-
        for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
                struct qt2160_led *led = &qt2160->leds[i];
 
                snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
                led->cdev.name = led->name;
-               led->cdev.brightness_set = qt2160_led_set;
+               led->cdev.brightness_set_blocking = qt2160_led_set;
                led->cdev.brightness = LED_OFF;
                led->id = i;
                led->qt2160 = qt2160;
 
-               INIT_WORK(&led->work, qt2160_led_work);
-
                ret = led_classdev_register(&client->dev, &led->cdev);
                if (ret < 0)
                        return ret;
@@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160)
 {
        int i;
 
-       for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
+       for (i = 0; i < QT2160_NUM_LEDS_X; i++)
                led_classdev_unregister(&qt2160->leds[i].cdev);
-               cancel_work_sync(&qt2160->leds[i].work);
-       }
 }
 
 #else
index babcfb165e4f0060c854ee524582b3ec436e8978..3b85631fde9182931a1a5f3a43cf2ec64d52dbb9 100644 (file)
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
 
        input_dev->id.bustype = BUS_HOST;
 
+       keypad_data->input_dev = input_dev;
+
        error = keypad_matrix_key_parse_dt(keypad_data);
        if (error)
                return error;
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
 
        input_set_drvdata(input_dev, keypad_data);
 
-       keypad_data->input_dev = input_dev;
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(keypad_data->base))
index 094bddf56755f11310ee18c6eb5fda33242f57ff..c1e66f45d552a3b64058611bbc8e18c741f6aad9 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/io.h>
 #include <linux/input-polldev.h>
 #include <linux/i2c.h>
-#include <linux/workqueue.h>
 #include <linux/leds.h>
 
 #define APANEL_NAME    "Fujitsu Application Panel"
@@ -59,8 +58,6 @@ struct apanel {
        struct i2c_client *client;
        unsigned short keymap[MAX_PANEL_KEYS];
        u16    nkeys;
-       u16    led_bits;
-       struct work_struct led_work;
        struct led_classdev mail_led;
 };
 
@@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev)
                        report_key(idev, ap->keymap[i]);
 }
 
-/* Track state changes of LED */
-static void led_update(struct work_struct *work)
-{
-       struct apanel *ap = container_of(work, struct apanel, led_work);
-
-       i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits);
-}
-
-static void mail_led_set(struct led_classdev *led,
+static int mail_led_set(struct led_classdev *led,
                         enum led_brightness value)
 {
        struct apanel *ap = container_of(led, struct apanel, mail_led);
+       u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000;
 
-       if (value != LED_OFF)
-               ap->led_bits |= 0x8000;
-       else
-               ap->led_bits &= ~0x8000;
-
-       schedule_work(&ap->led_work);
+       return i2c_smbus_write_word_data(ap->client, 0x10, led_bits);
 }
 
 static int apanel_remove(struct i2c_client *client)
@@ -179,7 +164,7 @@ static struct apanel apanel = {
        },
        .mail_led = {
                .name = "mail:blue",
-               .brightness_set = mail_led_set,
+               .brightness_set_blocking = mail_led_set,
        },
 };
 
@@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client,
        if (err)
                goto out3;
 
-       INIT_WORK(&ap->led_work, led_update);
        if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
                err = led_classdev_register(&client->dev, &ap->mail_led);
                if (err)
index 1efcfdf9f8a84c89ab0e1a8d5729046473b79ed7..dd9dd4e4082718f43b923ff8ad984b148a15ee29 100644 (file)
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
        idev->close = bma150_irq_close;
        input_set_drvdata(idev, bma150);
 
+       bma150->input = idev;
+
        error = input_register_device(idev);
        if (error) {
                input_free_device(idev);
                return error;
        }
 
-       bma150->input = idev;
        return 0;
 }
 
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
 
        bma150_init_input_device(bma150, ipoll_dev->input);
 
+       bma150->input_polled = ipoll_dev;
+       bma150->input = ipoll_dev->input;
+
        error = input_register_polled_device(ipoll_dev);
        if (error) {
                input_free_polled_device(ipoll_dev);
                return error;
        }
 
-       bma150->input_polled = ipoll_dev;
-       bma150->input = ipoll_dev->input;
-
        return 0;
 }
 
index 55da191ae550743edf6505a01cc3af53404910a9..dbb6d9e1b9471380b50d66af222c89c75d472ac5 100644 (file)
@@ -34,6 +34,7 @@ struct pwm_vibrator {
        struct work_struct play_work;
        u16 level;
        u32 direction_duty_cycle;
+       bool vcc_on;
 };
 
 static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
        struct pwm_state state;
        int err;
 
-       err = regulator_enable(vibrator->vcc);
-       if (err) {
-               dev_err(pdev, "failed to enable regulator: %d", err);
-               return err;
+       if (!vibrator->vcc_on) {
+               err = regulator_enable(vibrator->vcc);
+               if (err) {
+                       dev_err(pdev, "failed to enable regulator: %d", err);
+                       return err;
+               }
+               vibrator->vcc_on = true;
        }
 
        pwm_get_state(vibrator->pwm, &state);
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
 
 static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
 {
-       regulator_disable(vibrator->vcc);
-
        if (vibrator->pwm_dir)
                pwm_disable(vibrator->pwm_dir);
        pwm_disable(vibrator->pwm);
+
+       if (vibrator->vcc_on) {
+               regulator_disable(vibrator->vcc);
+               vibrator->vcc_on = false;
+       }
 }
 
 static void pwm_vibrator_play_work(struct work_struct *work)
index f322a1768fbb5537f4c3523c7a2fcb220277b90a..225ae6980182f778c2987827952afc668afb021f 100644 (file)
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
 static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0000", 0 },
        { "ELAN0100", 0 },
-       { "ELAN0501", 0 },
        { "ELAN0600", 0 },
        { "ELAN0602", 0 },
        { "ELAN0605", 0 },
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN060C", 0 },
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
+       { "ELAN0617", 0 },
        { "ELAN0618", 0 },
        { "ELAN061C", 0 },
        { "ELAN061D", 0 },
index 9fe075c137dc41bb513060d5b69871ed922e493f..a7f8b16145595bd004b5fec8fdf27e2d6cf7f441 100644 (file)
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX31               0x361f00        20, 15, 0e      clickpad
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
+ * Fujitsu CELSIUS H760    0x570f02        40, 14, 0c      3 hw buttons (**)
+ * Fujitsu CELSIUS H780    0x5d0f02        41, 16, 0d      3 hw buttons (**)
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E546   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
                },
        },
+       {
+               /* Fujitsu H780 also has a middle button */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
+               },
+       },
 #endif
        { }
 };
index c62cceb97bb15bc9ff172a51757ea88673226bde..5e8d8384aa2a5da4508f789f96a23ddd3cbde425 100644 (file)
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio)
 {
        struct ps2_gpio_data *drvdata = serio->port_data;
 
+       flush_delayed_work(&drvdata->tx_work);
        disable_irq(drvdata->irq);
 }
 
index d713271ebf7c73f161456195e9f3ffc68db8c495..a64116586b4cccbfbe6b8e26c38209eb7ac5cd10 100644 (file)
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan)
 
        /* Clear ring flush state */
        timeout = 1000; /* timeout of 1s */
-       writel_relaxed(0x0, ring + RING_CONTROL);
+       writel_relaxed(0x0, ring->regs + RING_CONTROL);
        do {
-               if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+               if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
                      FLUSH_DONE_MASK))
                        break;
                mdelay(1);
index c6a7d4582dc6790be3a50f545faa36bc07ea8a7f..38d9df3fb199fed88a4825b7b392e20c27e85c2a 100644 (file)
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(mbox_flush);
 
 /**
  * mbox_request_channel - Request a mailbox channel.
index 0e4bbdcc614f073c7ec7dbcbb3a83291dbeadbf0..c76892ac4e699cf083c74330c398e36b91b86a8c 100644 (file)
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
        b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
 }
 
-static void b53_enable_vlan(struct b53_device *dev, bool enable)
+static void b53_enable_vlan(struct b53_device *dev, bool enable,
+                           bool enable_filtering)
 {
        u8 mgmt, vc0, vc1, vc4 = 0, vc5;
 
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
                vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
                vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
                vc4 &= ~VC4_ING_VID_CHECK_MASK;
-               vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
-               vc5 |= VC5_DROP_VTABLE_MISS;
+               if (enable_filtering) {
+                       vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+                       vc5 |= VC5_DROP_VTABLE_MISS;
+               } else {
+                       vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+                       vc5 &= ~VC5_DROP_VTABLE_MISS;
+               }
 
                if (is5325(dev))
                        vc0 &= ~VC0_RESERVED_1;
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
        }
 
        b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+       dev->vlan_enabled = enable;
+       dev->vlan_filtering_enabled = enable_filtering;
 }
 
 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev)
        b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
 }
 
+static u16 b53_default_pvid(struct b53_device *dev)
+{
+       if (is5325(dev) || is5365(dev))
+               return 1;
+       else
+               return 0;
+}
+
 int b53_configure_vlan(struct dsa_switch *ds)
 {
        struct b53_device *dev = ds->priv;
        struct b53_vlan vl = { 0 };
-       int i;
+       int i, def_vid;
+
+       def_vid = b53_default_pvid(dev);
 
        /* clear all vlan entries */
        if (is5325(dev) || is5365(dev)) {
-               for (i = 1; i < dev->num_vlans; i++)
+               for (i = def_vid; i < dev->num_vlans; i++)
                        b53_set_vlan_entry(dev, i, &vl);
        } else {
                b53_do_vlan_op(dev, VTA_CMD_CLEAR);
        }
 
-       b53_enable_vlan(dev, false);
+       b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
 
        b53_for_each_port(dev, i)
                b53_write16(dev, B53_VLAN_PAGE,
-                           B53_VLAN_PORT_DEF_TAG(i), 1);
+                           B53_VLAN_PORT_DEF_TAG(i), def_vid);
 
        if (!is5325(dev) && !is5365(dev))
                b53_set_jumbo(dev, dev->enable_jumbo, false);
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
 
 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
 {
+       struct b53_device *dev = ds->priv;
+       struct net_device *bridge_dev;
+       unsigned int i;
+       u16 pvid, new_pvid;
+
+       /* Handle the case were multiple bridges span the same switch device
+        * and one of them has a different setting than what is being requested
+        * which would be breaking filtering semantics for any of the other
+        * bridge devices.
+        */
+       b53_for_each_port(dev, i) {
+               bridge_dev = dsa_to_port(ds, i)->bridge_dev;
+               if (bridge_dev &&
+                   bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
+                   br_vlan_enabled(bridge_dev) != vlan_filtering) {
+                       netdev_err(bridge_dev,
+                                  "VLAN filtering is global to the switch!\n");
+                       return -EINVAL;
+               }
+       }
+
+       b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+       new_pvid = pvid;
+       if (dev->vlan_filtering_enabled && !vlan_filtering) {
+               /* Filtering is currently enabled, use the default PVID since
+                * the bridge does not expect tagging anymore
+                */
+               dev->ports[port].pvid = pvid;
+               new_pvid = b53_default_pvid(dev);
+       } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
+               /* Filtering is currently disabled, restore the previous PVID */
+               new_pvid = dev->ports[port].pvid;
+       }
+
+       if (pvid != new_pvid)
+               b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+                           new_pvid);
+
+       b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
+
        return 0;
 }
 EXPORT_SYMBOL(b53_vlan_filtering);
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
        if (vlan->vid_end > dev->num_vlans)
                return -ERANGE;
 
-       b53_enable_vlan(dev, true);
+       b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
 
        return 0;
 }
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
                b53_fast_age_vlan(dev, vid);
        }
 
-       if (pvid) {
+       if (pvid && !dsa_is_cpu_port(ds, port)) {
                b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
                            vlan->vid_end);
                b53_fast_age_vlan(dev, vid);
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
 
                vl->members &= ~BIT(port);
 
-               if (pvid == vid) {
-                       if (is5325(dev) || is5365(dev))
-                               pvid = 1;
-                       else
-                               pvid = 0;
-               }
+               if (pvid == vid)
+                       pvid = b53_default_pvid(dev);
 
                if (untagged && !dsa_is_cpu_port(ds, port))
                        vl->untag &= ~(BIT(port));
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
        b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
        dev->ports[port].vlan_ctl_mask = pvlan;
 
-       if (is5325(dev) || is5365(dev))
-               pvid = 1;
-       else
-               pvid = 0;
+       pvid = b53_default_pvid(dev);
 
        /* Make this port join all VLANs without VLAN entries */
        if (is58xx(dev)) {
index ec796482792d117a962510fe65c911d5144df7a0..4dc7ee38b2580fe8fee4e22e31f505559cd53dd8 100644 (file)
@@ -91,6 +91,7 @@ enum {
 struct b53_port {
        u16             vlan_ctl_mask;
        struct ethtool_eee eee;
+       u16             pvid;
 };
 
 struct b53_vlan {
@@ -137,6 +138,8 @@ struct b53_device {
 
        unsigned int num_vlans;
        struct b53_vlan *vlans;
+       bool vlan_enabled;
+       bool vlan_filtering_enabled;
        unsigned int num_ports;
        struct b53_port *ports;
 };
index 98696a88fa1c129cf4cd0bc1184e8f389ee38e66..f91b8e77d5439896f91114d789abef7aafcb52bf 100644 (file)
@@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
 {
        struct net_device *p = ds->ports[port].cpu_dp->master;
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
-       struct ethtool_wolinfo pwol;
+       struct ethtool_wolinfo pwol = { };
 
        /* Get the parent device WoL settings */
-       p->ethtool_ops->get_wol(p, &pwol);
+       if (p->ethtool_ops->get_wol)
+               p->ethtool_ops->get_wol(p, &pwol);
 
        /* Advertise the parent device supported settings */
        wol->supported = pwol.supported;
@@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
        struct net_device *p = ds->ports[port].cpu_dp->master;
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        s8 cpu_port = ds->ports[port].cpu_dp->index;
-       struct ethtool_wolinfo pwol;
+       struct ethtool_wolinfo pwol =  { };
 
-       p->ethtool_ops->get_wol(p, &pwol);
+       if (p->ethtool_ops->get_wol)
+               p->ethtool_ops->get_wol(p, &pwol);
        if (wol->wolopts & ~pwol.supported)
                return -EINVAL;
 
index a4b6cda380169868d7843fc0bf3af6b53ce57f1b..195a8a87b98411ef06d11a29575962fe0ce03830 100644 (file)
@@ -443,6 +443,18 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
                val = QCA8K_PORT_PAD_RGMII_EN;
                qca8k_write(priv, reg, val);
                break;
+       case PHY_INTERFACE_MODE_RGMII_ID:
+               /* RGMII_ID needs internal delay. This is enabled through
+                * PORT5_PAD_CTRL for all ports, rather than individual port
+                * registers
+                */
+               qca8k_write(priv, reg,
+                           QCA8K_PORT_PAD_RGMII_EN |
+                           QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) |
+                           QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY));
+               qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+                           QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+               break;
        case PHY_INTERFACE_MODE_SGMII:
                qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
                break;
index 613fe5c50236c50cfbc6659b5a7d895b21409ec9..d146e54c8a6c615045ff18b31b413fba08365221 100644 (file)
@@ -40,6 +40,7 @@
                                                ((0x8 + (x & 0x3)) << 22)
 #define   QCA8K_PORT_PAD_RGMII_RX_DELAY(x)             \
                                                ((0x10 + (x & 0x3)) << 20)
+#define   QCA8K_MAX_DELAY                              3
 #define   QCA8K_PORT_PAD_RGMII_RX_DELAY_EN             BIT(24)
 #define   QCA8K_PORT_PAD_SGMII_EN                      BIT(7)
 #define QCA8K_REG_MODULE_EN                            0x030
index 28c9b0bdf2f6cff3fc5cfeed3d8ef0beed1dfb60..bc3ac369cbe35571a9f43c7589fff01148fd68a0 100644 (file)
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev,
 
        priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
        reg = rxchk_readl(priv, RXCHK_CONTROL);
+       /* Clear L2 header checks, which would prevent BPDUs
+        * from being received.
+        */
+       reg &= ~RXCHK_L2_HDR_DIS;
        if (priv->rx_chk_en)
                reg |= RXCHK_EN;
        else
index dc2f58a7c9e5b8bc799d537bec7d819ca3b610eb..8c1497e7d9c5c61c2ec4ec452066b91edb7c4680 100644 (file)
@@ -92,7 +92,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
 
        ptp_qoriq->dev = &pdev->dev;
 
-       err = ptp_qoriq_init(ptp_qoriq, base, enetc_ptp_caps);
+       err = ptp_qoriq_init(ptp_qoriq, base, &enetc_ptp_caps);
        if (err)
                goto err_no_clock;
 
index b8155f5e71b41aee8c4e3b7e8cab151aac766ffb..ac55db065f167ad58f9ec41966afa7b0299b5f40 100644 (file)
@@ -3128,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
                dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
                dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
        }
+
+       put_device(&pdev->dev);
+
        return 0;
 }
 EXPORT_SYMBOL(hns_dsaf_roce_reset);
index 2f427271a793e4749f4a5f1d1163e738545ed173..292a668ce88e6f75580910eb1997e3b791a27d72 100644 (file)
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
 
        ret = mv643xx_eth_shared_of_probe(pdev);
        if (ret)
-               return ret;
+               goto err_put_clk;
        pd = dev_get_platdata(&pdev->dev);
 
        msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
        infer_hw_params(msp);
 
        return 0;
+
+err_put_clk:
+       if (!IS_ERR(msp->clk))
+               clk_disable_unprepare(msp->clk);
+       return ret;
 }
 
 static int mv643xx_eth_shared_remove(struct platform_device *pdev)
index f3a5fa84860f907748e702fb11521ff625d0f340..57727fe1501ee851ec5ca3e4dfe45d54ee5934e9 100644 (file)
@@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        INIT_WORK(&hw->restart_work, sky2_restart);
 
        pci_set_drvdata(pdev, hw);
-       pdev->d3_delay = 200;
+       pdev->d3_delay = 300;
 
        return 0;
 
index 6b88881b8e3585422f2548df3267175bc9d6b16f..c1438ae52a11922a79b281937f5d6bb5a7b4ca3b 100644 (file)
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->addr_len = ETH_ALEN;
        mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
        if (!is_valid_ether_addr(dev->dev_addr)) {
-               en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+               en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
                       priv->port, dev->dev_addr);
                err = -EINVAL;
                goto out;
index bc349d8ca08a9bacb7d640bb60aae0925f586cb2..7ee6d747e97b087ab029a1f6ff9090cfad06a566 100644 (file)
@@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
                bool configure = false;
                bool pfc = false;
+               u16 thres_cells;
+               u16 delay_cells;
                bool lossy;
-               u16 thres;
 
                for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
                        if (prio_tc[j] == i) {
@@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
                        continue;
 
                lossy = !(pfc || pause_en);
-               thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
-               delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
-                                                 pause_en);
-               mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
+               thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+               delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
+                                                       pfc, pause_en);
+               mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
+                                    thres_cells, lossy);
        }
 
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
index 000009e18ff8c4fa0948423b2ccb764095c872ab..9098ee7fe0d1cd95c1dea044c74722de25a12cf6 100644 (file)
@@ -1869,56 +1869,28 @@ static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigne
 static void ns83820_probe_phy(struct net_device *ndev)
 {
        struct ns83820 *dev = PRIV(ndev);
-       static int first;
-       int i;
-#define MII_PHYIDR1    0x02
-#define MII_PHYIDR2    0x03
-
-#if 0
-       if (!first) {
-               unsigned tmp;
-               ns83820_mii_read_reg(dev, 1, 0x09);
-               ns83820_mii_write_reg(dev, 1, 0x10, 0x0d3e);
-
-               tmp = ns83820_mii_read_reg(dev, 1, 0x00);
-               ns83820_mii_write_reg(dev, 1, 0x00, tmp | 0x8000);
-               udelay(1300);
-               ns83820_mii_read_reg(dev, 1, 0x09);
-       }
-#endif
-       first = 1;
-
-       for (i=1; i<2; i++) {
-               int j;
-               unsigned a, b;
-               a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1);
-               b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2);
-
-               //printk("%s: phy %d: 0x%04x 0x%04x\n",
-               //      ndev->name, i, a, b);
-
-               for (j=0; j<0x16; j+=4) {
-                       dprintk("%s: [0x%02x] %04x %04x %04x %04x\n",
-                               ndev->name, j,
-                               ns83820_mii_read_reg(dev, i, 0 + j),
-                               ns83820_mii_read_reg(dev, i, 1 + j),
-                               ns83820_mii_read_reg(dev, i, 2 + j),
-                               ns83820_mii_read_reg(dev, i, 3 + j)
-                               );
-               }
-       }
-       {
-               unsigned a, b;
-               /* read firmware version: memory addr is 0x8402 and 0x8403 */
-               ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
-               ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
-               a = ns83820_mii_read_reg(dev, 1, 0x1d);
-
-               ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
-               ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
-               b = ns83820_mii_read_reg(dev, 1, 0x1d);
-               dprintk("version: 0x%04x 0x%04x\n", a, b);
+       int j;
+       unsigned a, b;
+
+       for (j = 0; j < 0x16; j += 4) {
+               dprintk("%s: [0x%02x] %04x %04x %04x %04x\n",
+                       ndev->name, j,
+                       ns83820_mii_read_reg(dev, 1, 0 + j),
+                       ns83820_mii_read_reg(dev, 1, 1 + j),
+                       ns83820_mii_read_reg(dev, 1, 2 + j),
+                       ns83820_mii_read_reg(dev, 1, 3 + j)
+                       );
        }
+
+       /* read firmware version: memory addr is 0x8402 and 0x8403 */
+       ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
+       ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
+       a = ns83820_mii_read_reg(dev, 1, 0x1d);
+
+       ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
+       ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
+       b = ns83820_mii_read_reg(dev, 1, 0x1d);
+       dprintk("version: 0x%04x 0x%04x\n", a, b);
 }
 #endif
 
index beb8e5d6401a99e85667c2b12ad17e4a36968b8c..ded556b7bab5e51d6cbaca834296fa0e66d72881 100644 (file)
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
 
        eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
 
+       if (!ether_addr_equal(ethh->h_dest,
+                             p_hwfn->p_rdma_info->iwarp.mac_addr)) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_RDMA,
+                          "Got unexpected mac %pM instead of %pM\n",
+                          ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
+               return -EINVAL;
+       }
+
        ether_addr_copy(remote_mac_addr, ethh->h_source);
        ether_addr_copy(local_mac_addr, ethh->h_dest);
 
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
        struct qed_iwarp_info *iwarp_info;
        struct qed_ll2_acquire_data data;
        struct qed_ll2_cbs cbs;
-       u32 mpa_buff_size;
+       u32 buff_size;
        u16 n_ooo_bufs;
        int rc = 0;
        int i;
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
 
        memset(&data, 0, sizeof(data));
        data.input.conn_type = QED_LL2_TYPE_IWARP;
-       data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
+       data.input.mtu = params->max_mtu;
        data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
        data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
        data.input.tx_max_bds_per_packet = 1;   /* will never be fragmented */
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
                goto err;
        }
 
+       buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
        rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
                                         QED_IWARP_LL2_SYN_RX_SIZE,
-                                        QED_IWARP_MAX_SYN_PKT_SIZE,
+                                        buff_size,
                                         iwarp_info->ll2_syn_handle);
        if (rc)
                goto err;
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
        if (rc)
                goto err;
 
-       mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
        rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
                                         data.input.rx_num_desc,
-                                        mpa_buff_size,
+                                        buff_size,
                                         iwarp_info->ll2_mpa_handle);
        if (rc)
                goto err;
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
 
        iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
 
-       iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
+       iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
        if (!iwarp_info->mpa_intermediate_buf)
                goto err;
 
index b8f612d002419ea751c098e722e5574b855b8f9a..7ac959038324ef6f74a131f7844d7db1117d4892 100644 (file)
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
 
 #define QED_IWARP_LL2_SYN_TX_SIZE       (128)
 #define QED_IWARP_LL2_SYN_RX_SIZE       (256)
-#define QED_IWARP_MAX_SYN_PKT_SIZE      (128)
 
 #define QED_IWARP_LL2_OOO_DEF_TX_SIZE   (256)
 #define QED_IWARP_MAX_OOO              (16)
index 90045fffd393827a0626fd94435bbc443768757c..7fbb6a4dbf5107723f16b825e7a3b1577c97254a 100644 (file)
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
 static int dwmac4_rx_check_timestamp(void *desc)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
+       unsigned int rdes0 = le32_to_cpu(p->des0);
+       unsigned int rdes1 = le32_to_cpu(p->des1);
+       unsigned int rdes3 = le32_to_cpu(p->des3);
        u32 own, ctxt;
        int ret = 1;
 
-       own = p->des3 & RDES3_OWN;
-       ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+       own = rdes3 & RDES3_OWN;
+       ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
                >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
 
        if (likely(!own && ctxt)) {
-               if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+               if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
                        /* Corrupted value */
                        ret = -EINVAL;
                else
index 5d85742a2be0b62189ce83dbc0de40714f6eb34c..3c749c327cbd4a0305c1833a72c60d6d2e253917 100644 (file)
@@ -696,25 +696,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
                                     struct ethtool_eee *edata)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       int ret;
 
-       priv->eee_enabled = edata->eee_enabled;
-
-       if (!priv->eee_enabled)
+       if (!edata->eee_enabled) {
                stmmac_disable_eee_mode(priv);
-       else {
+       else {
                /* We are asking for enabling the EEE but it is safe
                 * to verify all by invoking the eee_init function.
                 * In case of failure it will return an error.
                 */
-               priv->eee_enabled = stmmac_eee_init(priv);
-               if (!priv->eee_enabled)
+               edata->eee_enabled = stmmac_eee_init(priv);
+               if (!edata->eee_enabled)
                        return -EOPNOTSUPP;
-
-               /* Do not change tx_lpi_timer in case of failure */
-               priv->tx_lpi_timer = edata->tx_lpi_timer;
        }
 
-       return phy_ethtool_set_eee(dev->phydev, edata);
+       ret = phy_ethtool_set_eee(dev->phydev, edata);
+       if (ret)
+               return ret;
+
+       priv->eee_enabled = edata->eee_enabled;
+       priv->tx_lpi_timer = edata->tx_lpi_timer;
+       return 0;
 }
 
 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
index 1f612268c9987796aa35e18c990dfd45fc45628f..d847f672a705f47c6b4bdf579917385b2640da79 100644 (file)
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
                const char *name;
                char node_name[32];
 
-               if (of_property_read_string(node, "label", &name) < 0) {
+               if (of_property_read_string(child, "label", &name) < 0) {
                        snprintf(node_name, sizeof(node_name), "%pOFn", child);
                        name = node_name;
                }
index ebf419dc73073f736c3c1a9f1ac12ede78883749..2d1449345959259c908ee2999d387029b66087f2 100644 (file)
@@ -35,7 +35,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
        u16 val = 0;
        int err;
 
-       err = priv->phy_drv->read_status(phydev);
+       if (priv->phy_drv->read_status)
+               err = priv->phy_drv->read_status(phydev);
+       else
+               err = genphy_read_status(phydev);
        if (err < 0)
                return err;
 
index 735ad838e2ba86ab689c549d4a89e5ca54fecd95..18af2f8eee96a87418dd93005600e661a93a818a 100644 (file)
@@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 19)},   /* Sierra Wireless MC7710 in QMI mode */
-       {QMI_FIXED_INTF(0x1199, 0x68c0, 8)},    /* Sierra Wireless MC7304/MC7354 */
-       {QMI_FIXED_INTF(0x1199, 0x68c0, 10)},   /* Sierra Wireless MC7304/MC7354 */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
        {QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
        {QMI_FIXED_INTF(0x1199, 0x9041, 8)},    /* Sierra Wireless MC7305/MC7355 */
index 60dd1ec1665f992ea9b50551672d4a85a5b6b670..ada6baf8847aa87f8980f2aae0deef6a211ed9de 100644 (file)
@@ -557,6 +557,7 @@ enum spd_duplex {
 /* MAC PASSTHRU */
 #define AD_MASK                        0xfee0
 #define BND_MASK               0x0004
+#define BD_MASK                        0x0001
 #define EFUSE                  0xcfdb
 #define PASS_THRU_MASK         0x1
 
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
                        return -ENODEV;
                }
        } else {
-               /* test for RTL8153-BND */
+               /* test for RTL8153-BND and RTL8153-BD */
                ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
-               if ((ocp_data & BND_MASK) == 0) {
+               if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK)) {
                        netif_dbg(tp, probe, tp->netdev,
                                  "Invalid variant for MAC pass through\n");
                        return -ENODEV;
index f66e1b2f0980ca380b73c41ced936659f08890be..3987adaaf2bda1f420775510f264763f3ba61701 100644 (file)
@@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = {
        .get_txpower = mt76_get_txpower,
 };
 
-static int mt76x0u_register_device(struct mt76x02_dev *dev)
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
 {
-       struct ieee80211_hw *hw = dev->mt76.hw;
        int err;
 
-       err = mt76u_alloc_queues(&dev->mt76);
-       if (err < 0)
-               goto out_err;
-
-       err = mt76u_mcu_init_rx(&dev->mt76);
-       if (err < 0)
-               goto out_err;
-
        mt76x0_chip_onoff(dev, true, true);
-       if (!mt76x02_wait_for_mac(&dev->mt76)) {
-               err = -ETIMEDOUT;
-               goto out_err;
-       }
+
+       if (!mt76x02_wait_for_mac(&dev->mt76))
+               return -ETIMEDOUT;
 
        err = mt76x0u_mcu_init(dev);
        if (err < 0)
-               goto out_err;
+               return err;
 
        mt76x0_init_usb_dma(dev);
        err = mt76x0_init_hardware(dev);
        if (err < 0)
-               goto out_err;
+               return err;
 
        mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
        mt76_wr(dev, MT_TXOP_CTRL_CFG,
                FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
                FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
 
+       return 0;
+}
+
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
+{
+       struct ieee80211_hw *hw = dev->mt76.hw;
+       int err;
+
+       err = mt76u_alloc_queues(&dev->mt76);
+       if (err < 0)
+               goto out_err;
+
+       err = mt76u_mcu_init_rx(&dev->mt76);
+       if (err < 0)
+               goto out_err;
+
+       err = mt76x0u_init_hardware(dev);
+       if (err < 0)
+               goto out_err;
+
        err = mt76x0_register_device(dev);
        if (err < 0)
                goto out_err;
@@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
 
        mt76u_stop_queues(&dev->mt76);
        mt76x0u_mac_stop(dev);
+       clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+       mt76x0_chip_onoff(dev, false, false);
        usb_kill_urb(usb->mcu.res.urb);
 
        return 0;
@@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
        tasklet_enable(&usb->rx_tasklet);
        tasklet_enable(&usb->tx_tasklet);
 
-       ret = mt76x0_init_hardware(dev);
+       ret = mt76x0u_init_hardware(dev);
        if (ret)
                goto err;
 
index 42d3654f77f0e74c8a4071ccaf1d9a566addf093..53775362aac633396993f12b98c837cc06c0651e 100644 (file)
@@ -459,7 +459,7 @@ static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq,
 }
 
 int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
-                  const struct ptp_clock_info caps)
+                  const struct ptp_clock_info *caps)
 {
        struct device_node *node = ptp_qoriq->dev->of_node;
        struct ptp_qoriq_registers *regs;
@@ -468,7 +468,7 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
        u32 tmr_ctrl;
 
        ptp_qoriq->base = base;
-       ptp_qoriq->caps = caps;
+       ptp_qoriq->caps = *caps;
 
        if (of_property_read_u32(node, "fsl,cksel", &ptp_qoriq->cksel))
                ptp_qoriq->cksel = DEFAULT_CKSEL;
@@ -605,7 +605,7 @@ static int ptp_qoriq_probe(struct platform_device *dev)
                goto no_ioremap;
        }
 
-       err = ptp_qoriq_init(ptp_qoriq, base, ptp_qoriq_caps);
+       err = ptp_qoriq_init(ptp_qoriq, base, &ptp_qoriq_caps);
        if (err)
                goto no_clock;
 
index aeeb0144bd5595ae3491b96349e505545d35fbe0..8d1acc802a6741ef38efd09c5f5382d413a4b011 100644 (file)
@@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
 
                /* Issue Marker IOCB */
                qla2x00_marker(vha, vha->hw->req_q_map[0],
-                   vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+                   vha->hw->rsp_q_map[0], fcport->loop_id, lun,
                    flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
        }
 
 done_free_sp:
        sp->free(sp);
-       sp->fcport->flags &= ~FCF_ASYNC_SENT;
+       fcport->flags &= ~FCF_ASYNC_SENT;
 done:
        return rval;
 }
index b2da8a00ec3357af6fd5460bb45016eafe8e389d..5464d467e23ea120aee77659aaa68692634c3603 100644 (file)
@@ -2951,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
        if (rot == 1) {
                blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
                blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
-       } else {
-               blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
-               blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
        }
 
        if (sdkp->device->type == TYPE_ZBC) {
@@ -3090,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
        if (sdkp->media_present) {
                sd_read_capacity(sdkp, buffer);
 
+               /*
+                * set the default to rotational.  All non-rotational devices
+                * support the block characteristics VPD page, which will
+                * cause this to be updated correctly and any device which
+                * doesn't support it should be treated as rotational.
+                */
+               blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+               blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
+
                if (scsi_device_supports_vpd(sdp)) {
                        sd_read_block_provisioning(sdkp);
                        sd_read_block_limits(sdkp);
index 24a129fcdd61b38bd8bb5045d22dd4ca0884a59f..a2e5dc7716e21c7cbed163159a0c1262df977227 100644 (file)
@@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
 
        ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
                             len, iov, 64, VHOST_ACCESS_WO);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        for (i = 0; i < ret; i++) {
index 7cde3f46ad263ab084aafaefa14161902f33d4f1..e996174cbfc026d3d3471cb01a75b437f5b46c1b 100644 (file)
 #include <linux/err.h>
 #include <linux/fs.h>
 
+static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
+static inline char *next_non_spacetab(char *first, const char *last)
+{
+       for (; first <= last; first++)
+               if (!spacetab(*first))
+                       return first;
+       return NULL;
+}
+static inline char *next_terminator(char *first, const char *last)
+{
+       for (; first <= last; first++)
+               if (spacetab(*first) || !*first)
+                       return first;
+       return NULL;
+}
+
 static int load_script(struct linux_binprm *bprm)
 {
        const char *i_arg, *i_name;
-       char *cp;
+       char *cp, *buf_end;
        struct file *file;
        int retval;
 
+       /* Not ours to exec if we don't start with "#!". */
        if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
                return -ENOEXEC;
 
@@ -33,18 +50,40 @@ static int load_script(struct linux_binprm *bprm)
        if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
                return -ENOENT;
 
-       /*
-        * This section does the #! interpretation.
-        * Sorta complicated, but hopefully it will work.  -TYT
-        */
-
+       /* Release since we are not mapping a binary into memory. */
        allow_write_access(bprm->file);
        fput(bprm->file);
        bprm->file = NULL;
 
-       bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
-       if ((cp = strchr(bprm->buf, '\n')) == NULL)
-               cp = bprm->buf+BINPRM_BUF_SIZE-1;
+       /*
+        * This section handles parsing the #! line into separate
+        * interpreter path and argument strings. We must be careful
+        * because bprm->buf is not yet guaranteed to be NUL-terminated
+        * (though the buffer will have trailing NUL padding when the
+        * file size was smaller than the buffer size).
+        *
+        * We do not want to exec a truncated interpreter path, so either
+        * we find a newline (which indicates nothing is truncated), or
+        * we find a space/tab/NUL after the interpreter path (which
+        * itself may be preceded by spaces/tabs). Truncating the
+        * arguments is fine: the interpreter can re-read the script to
+        * parse them on its own.
+        */
+       buf_end = bprm->buf + sizeof(bprm->buf) - 1;
+       cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
+       if (!cp) {
+               cp = next_non_spacetab(bprm->buf + 2, buf_end);
+               if (!cp)
+                       return -ENOEXEC; /* Entire buf is spaces/tabs */
+               /*
+                * If there is no later space/tab/NUL we must assume the
+                * interpreter path is truncated.
+                */
+               if (!next_terminator(cp, buf_end))
+                       return -ENOEXEC;
+               cp = buf_end;
+       }
+       /* NUL-terminate the buffer and any trailing spaces/tabs. */
        *cp = '\0';
        while (cp > bprm->buf) {
                cp--;
index f12cb31a41e5ba5bb1b0bbc7d710cecb11cf4e69..d09c9f878141a5c4213aef1faccb85b979bf4793 100644 (file)
@@ -238,9 +238,9 @@ out:
 }
 
 /* A writeback failed: mark the page as bad, and invalidate the page cache */
-static void nfs_set_pageerror(struct page *page)
+static void nfs_set_pageerror(struct address_space *mapping)
 {
-       nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
+       nfs_zap_mapping(mapping->host, mapping);
 }
 
 /*
@@ -994,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
                nfs_list_remove_request(req);
                if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
                    (hdr->good_bytes < bytes)) {
-                       nfs_set_pageerror(req->wb_page);
+                       nfs_set_pageerror(page_file_mapping(req->wb_page));
                        nfs_context_set_write_error(req->wb_context, hdr->error);
                        goto remove_req;
                }
@@ -1348,7 +1348,8 @@ int nfs_updatepage(struct file *file, struct page *page,
                unsigned int offset, unsigned int count)
 {
        struct nfs_open_context *ctx = nfs_file_open_context(file);
-       struct inode    *inode = page_file_mapping(page)->host;
+       struct address_space *mapping = page_file_mapping(page);
+       struct inode    *inode = mapping->host;
        int             status = 0;
 
        nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -1366,7 +1367,7 @@ int nfs_updatepage(struct file *file, struct page *page,
 
        status = nfs_writepage_setup(ctx, page, offset, count);
        if (status < 0)
-               nfs_set_pageerror(page);
+               nfs_set_pageerror(mapping);
        else
                __set_page_dirty_nobuffers(page);
 out:
index b33f9785b756edfa0ab597def3ce3827a5057513..72a7681f404699b89d502102a9c290078900647b 100644 (file)
@@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
        retval = nfsd_idmap_init(net);
        if (retval)
                goto out_idmap_error;
-       nn->nfsd4_lease = 45;   /* default lease time */
-       nn->nfsd4_grace = 45;
+       nn->nfsd4_lease = 90;   /* default lease time */
+       nn->nfsd4_grace = 90;
        nn->somebody_reclaimed = false;
        nn->clverifier_counter = prandom_u32();
        nn->clientid_counter = prandom_u32();
index 4f31f96bbfabd06aad4691366861946fed570bbf..c36c86f1ec9a3ea0d31aba9fed1bf33684ca39bf 100644 (file)
@@ -100,7 +100,7 @@ enum vgic_irq_config {
 };
 
 struct vgic_irq {
-       spinlock_t irq_lock;            /* Protects the content of the struct */
+       raw_spinlock_t irq_lock;        /* Protects the content of the struct */
        struct list_head lpi_list;      /* Used to link all LPIs together */
        struct list_head ap_list;
 
@@ -256,7 +256,7 @@ struct vgic_dist {
        u64                     propbaser;
 
        /* Protects the lpi_list and the count value below. */
-       spinlock_t              lpi_list_lock;
+       raw_spinlock_t          lpi_list_lock;
        struct list_head        lpi_list_head;
        int                     lpi_list_count;
 
@@ -307,7 +307,7 @@ struct vgic_cpu {
        unsigned int used_lrs;
        struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
 
-       spinlock_t ap_list_lock;        /* Protects the ap_list */
+       raw_spinlock_t ap_list_lock;    /* Protects the ap_list */
 
        /*
         * List of IRQs that this VCPU should consider because they are either
index 19f32b0c29af3ded16736b43b33604f910bac3dc..6b318efd8a7428042771b0b83f4e5c5846d46a9d 100644 (file)
@@ -34,6 +34,7 @@
 #ifndef __has_attribute
 # define __has_attribute(x) __GCC4_has_attribute_##x
 # define __GCC4_has_attribute___assume_aligned__      (__GNUC_MINOR__ >= 9)
+# define __GCC4_has_attribute___copy__                0
 # define __GCC4_has_attribute___designated_init__     0
 # define __GCC4_has_attribute___externally_visible__  1
 # define __GCC4_has_attribute___noclone__             1
  */
 #define __attribute_const__             __attribute__((__const__))
 
+/*
+ * Optional: only supported since gcc >= 9
+ * Optional: not supported by clang
+ * Optional: not supported by icc
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
+ */
+#if __has_attribute(__copy__)
+# define __copy(symbol)                 __attribute__((__copy__(symbol)))
+#else
+# define __copy(symbol)
+#endif
+
 /*
  * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
  * attribute warnings entirely and for good") for more information.
index 45ff763fba76ddad0e0141fca2a5efb2c912b8fa..28604a8d0aa940f862c5cf8a2ce89dd5d6f387a5 100644 (file)
@@ -1198,8 +1198,6 @@ static inline bool efi_enabled(int feature)
 extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
 
 extern bool efi_is_table_address(unsigned long phys_addr);
-
-extern int efi_apply_persistent_mem_reservations(void);
 #else
 static inline bool efi_enabled(int feature)
 {
@@ -1218,11 +1216,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
 {
        return false;
 }
-
-static inline int efi_apply_persistent_mem_reservations(void)
-{
-       return 0;
-}
 #endif
 
 extern int efi_status_to_err(efi_status_t status);
index f127adb710418756078c0c677a55ffaba249fe5a..992bf9fa17298ca6d7a8cfda6480a13a01ee876c 100644 (file)
@@ -183,7 +183,7 @@ static inline void qoriq_write_le(unsigned __iomem *addr, u32 val)
 
 irqreturn_t ptp_qoriq_isr(int irq, void *priv);
 int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
-                  const struct ptp_clock_info caps);
+                  const struct ptp_clock_info *caps);
 void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq);
 int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm);
 int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta);
index 64c41cf455906ab750d042f93410fa0c4c5aded6..859b55b66db2ab5bd6142868e677f2b02bc23ebe 100644 (file)
@@ -29,9 +29,6 @@ extern unsigned long max_pfn;
  */
 extern unsigned long long max_possible_pfn;
 
-#define INIT_MEMBLOCK_REGIONS  128
-#define INIT_PHYSMEM_REGIONS   4
-
 /**
  * enum memblock_flags - definition of memory region attributes
  * @MEMBLOCK_NONE: no special request
index 8fa38d3e75384c1a9c4d7607fefc24276cf174d2..f5bc4c046461544ad366f0af68c261cb34d2a668 100644 (file)
@@ -129,13 +129,13 @@ extern void cleanup_module(void);
 #define module_init(initfn)                                    \
        static inline initcall_t __maybe_unused __inittest(void)                \
        { return initfn; }                                      \
-       int init_module(void) __attribute__((alias(#initfn)));
+       int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
 
 /* This is only required if you want to be unloadable. */
 #define module_exit(exitfn)                                    \
        static inline exitcall_t __maybe_unused __exittest(void)                \
        { return exitfn; }                                      \
-       void cleanup_module(void) __attribute__((alias(#exitfn)));
+       void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
 
 #endif
 
index 2b2a6dce16301d4d9b683dad7503b383d8708500..4c76fe2c84880fa24bebc7826d19801a10466bfd 100644 (file)
@@ -11,6 +11,8 @@
 #define _LINUX_NETDEV_FEATURES_H
 
 #include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
 
 typedef u64 netdev_features_t;
 
@@ -154,8 +156,26 @@ enum {
 #define NETIF_F_HW_TLS_TX      __NETIF_F(HW_TLS_TX)
 #define NETIF_F_HW_TLS_RX      __NETIF_F(HW_TLS_RX)
 
-#define for_each_netdev_feature(mask_addr, bit)        \
-       for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+/* Finds the next feature with the highest number of the range of start till 0.
+ */
+static inline int find_next_netdev_feature(u64 feature, unsigned long start)
+{
+       /* like BITMAP_LAST_WORD_MASK() for u64
+        * this sets the most significant 64 - start to 0.
+        */
+       feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
+
+       return fls64(feature) - 1;
+}
+
+/* This goes for the MSB to the LSB through the set feature bits,
+ * mask_addr should be a u64 and bit an int
+ */
+#define for_each_netdev_feature(mask_addr, bit)                                \
+       for ((bit) = find_next_netdev_feature((mask_addr),              \
+                                             NETDEV_FEATURE_COUNT);    \
+            (bit) >= 0;                                                \
+            (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
 
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
index 1d5c551a5add50794e6d0ae38456b98380f800e6..e1a051724f7ef9c89864bfaffe31af328c5ecd68 100644 (file)
@@ -447,6 +447,11 @@ struct pmu {
         * Filter events for PMU-specific reasons.
         */
        int (*filter_match)             (struct perf_event *event); /* optional */
+
+       /*
+        * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
+        */
+       int (*check_period)             (struct perf_event *event, u64 value); /* optional */
 };
 
 enum perf_addr_filter_action_t {
index a41e84f7730c8fc17412f372b9214efb0f256325..2069fb90a5596263a7cbc2f67569ede1da4d1a4a 100644 (file)
@@ -2439,7 +2439,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
 
        if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
                skb_set_transport_header(skb, keys.control.thoff);
-       else
+       else if (offset_hint >= 0)
                skb_set_transport_header(skb, offset_hint);
 }
 
@@ -4235,6 +4235,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
        return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
 }
 
+static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
+{
+       return skb_is_gso(skb) &&
+              skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
+}
+
 static inline void skb_gso_reset(struct sk_buff *skb)
 {
        skb_shinfo(skb)->gso_size = 0;
index cb462f9ab7dd592bc1d613c86aefeb787cdd9321..71f2394abbf7c08c3215cc90517b6c2a836192cf 100644 (file)
@@ -57,6 +57,15 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
 
                if (!skb_partial_csum_set(skb, start, off))
                        return -EINVAL;
+       } else {
+               /* gso packets without NEEDS_CSUM do not set transport_offset.
+                * probe and drop if does not match one of the above types.
+                */
+               if (gso_type) {
+                       skb_probe_transport_header(skb, -1);
+                       if (!skb_transport_header_was_set(skb))
+                               return -EINVAL;
+               }
        }
 
        if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
index d43b145358275d314d02939f674a2ecc719852b2..950ab2f28922e3cbc341700f6d67d645d5185d73 100644 (file)
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
        struct stack_map_irq_work *work;
 
        work = container_of(entry, struct stack_map_irq_work, irq_work);
-       up_read(work->sem);
+       up_read_non_owner(work->sem);
        work->sem = NULL;
 }
 
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
        } else {
                work->sem = &current->mm->mmap_sem;
                irq_work_queue(&work->irq_work);
+               /*
+                * The irq_work will release the mmap_sem with
+                * up_read_non_owner(). The rwsem_release() is called
+                * here to release the lock from lockdep's perspective.
+                */
+               rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
        }
 }
 
index e5ede6918050eeddb27e1aa16361bbe6d6813b53..26d6edab051a1e41c81d23b8c29ab097d6c4c71b 100644 (file)
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
        }
 }
 
+static int perf_event_check_period(struct perf_event *event, u64 value)
+{
+       return event->pmu->check_period(event, value);
+}
+
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
        u64 value;
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
        if (event->attr.freq && value > sysctl_perf_event_sample_rate)
                return -EINVAL;
 
+       if (perf_event_check_period(event, value))
+               return -EINVAL;
+
        event_function_call(event, __perf_event_period, &value);
 
        return 0;
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
        return 0;
 }
 
+static int perf_event_nop_int(struct perf_event *event, u64 value)
+{
+       return 0;
+}
+
 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
 
 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9704,9 @@ got_cpu_context:
                pmu->pmu_disable = perf_pmu_nop_void;
        }
 
+       if (!pmu->check_period)
+               pmu->check_period = perf_event_nop_int;
+
        if (!pmu->event_idx)
                pmu->event_idx = perf_event_idx_default;
 
index 309ef5a64af5d03daa402749ea12fc12385ec4be..5ab4fe3b1dcc0b6a90bc2b4a5eb0e02a44ee909b 100644 (file)
@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
        size = sizeof(struct ring_buffer);
        size += nr_pages * sizeof(void *);
 
-       if (order_base_2(size) >= MAX_ORDER)
+       if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
                goto fail;
 
        rb = kzalloc(size, GFP_KERNEL);
index c521b7347482509e3d862f7bffbcf095b8177fd8..c4238b441624415cfd6113bb36ea5d71b6eaaa54 100644 (file)
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
        const char tgid_space[] = "          ";
        const char space[] = "  ";
 
+       print_event_info(buf, m);
+
        seq_printf(m, "#                          %s  _-----=> irqs-off\n",
                   tgid ? tgid_space : space);
        seq_printf(m, "#                          %s / _----=> need-resched\n",
index d5fb09ebba8b79da9e82bc6d23fc48e8d1a02274..9eaf07f99212f797df29fb5f0b93f475c48f441f 100644 (file)
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = {
 static nokprobe_inline int
 fetch_store_strlen(unsigned long addr)
 {
-       mm_segment_t old_fs;
        int ret, len = 0;
        u8 c;
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       pagefault_disable();
-
        do {
-               ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
+               ret = probe_mem_read(&c, (u8 *)addr + len, 1);
                len++;
        } while (c && ret == 0 && len < MAX_STRING_SIZE);
 
-       pagefault_enable();
-       set_fs(old_fs);
-
        return (ret < 0) ? ret : len;
 }
 
index 45b1d67a176710c8b4e4081331c5e9dfa1c9a30e..4a20455d1f61e36afabf8befadb15dfc0f42a3a2 100644 (file)
@@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
 EXPORT_SYMBOL(crc32_le);
 EXPORT_SYMBOL(__crc32c_le);
 
-u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
-u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
 
 /*
  * This multiplies the polynomials x and y modulo the given modulus.
index 022d4cbb3618bb3886cec62e69912b4e9a5e8067..ea31045ba70423d128575ab7e9e182a40bea425f 100644 (file)
 
 #include "internal.h"
 
+#define INIT_MEMBLOCK_REGIONS                  128
+#define INIT_PHYSMEM_REGIONS                   4
+
+#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
+# define INIT_MEMBLOCK_RESERVED_REGIONS                INIT_MEMBLOCK_REGIONS
+#endif
+
 /**
  * DOC: memblock overview
  *
@@ -92,7 +99,7 @@ unsigned long max_pfn;
 unsigned long long max_possible_pfn;
 
 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
-static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
 #endif
@@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = {
 
        .reserved.regions       = memblock_reserved_init_regions,
        .reserved.cnt           = 1,    /* empty dummy entry */
-       .reserved.max           = INIT_MEMBLOCK_REGIONS,
+       .reserved.max           = INIT_MEMBLOCK_RESERVED_REGIONS,
        .reserved.name          = "reserved",
 
 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
index 46285d28e43b529b333dbd8c3b3cf0123df66417..7f79b78bc8294557480a6df4fdb328f757895a7d 100644 (file)
@@ -4675,11 +4675,11 @@ refill:
                /* Even if we own the page, we do not use atomic_set().
                 * This would break get_page_unless_zero() users.
                 */
-               page_ref_add(page, size);
+               page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
 
                /* reset page count bias and offset to start of new frag */
                nc->pfmemalloc = page_is_pfmemalloc(page);
-               nc->pagecnt_bias = size + 1;
+               nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
                nc->offset = size;
        }
 
@@ -4695,10 +4695,10 @@ refill:
                size = nc->size;
 #endif
                /* OK, page count is 0, we can safely set it */
-               set_page_count(page, size + 1);
+               set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
 
                /* reset page count bias and offset to start of new frag */
-               nc->pagecnt_bias = size + 1;
+               nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
                offset = size - fragsz;
        }
 
index ecbe419e05ab5967d821534130348a95726f5eb6..a3d13f5e2bfc3e1c6353ba9cb4fa8f4e4404ba8e 100644 (file)
@@ -8215,7 +8215,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
        netdev_features_t feature;
        int feature_bit;
 
-       for_each_netdev_feature(&upper_disables, feature_bit) {
+       for_each_netdev_feature(upper_disables, feature_bit) {
                feature = __NETIF_F_BIT(feature_bit);
                if (!(upper->wanted_features & feature)
                    && (features & feature)) {
@@ -8235,7 +8235,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
        netdev_features_t feature;
        int feature_bit;
 
-       for_each_netdev_feature(&upper_disables, feature_bit) {
+       for_each_netdev_feature(upper_disables, feature_bit) {
                feature = __NETIF_F_BIT(feature_bit);
                if (!(features & feature) && (lower->features & feature)) {
                        netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
index b584cb42a8037301e81ba3eea5cada8c18ad82bb..5132c054c981faa9f345aaf8426f5378d37324aa 100644 (file)
@@ -2804,8 +2804,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
        u32 off = skb_mac_header_len(skb);
        int ret;
 
-       /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
-       if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+       if (!skb_is_gso_tcp(skb))
                return -ENOTSUPP;
 
        ret = skb_cow(skb, len_diff);
@@ -2846,8 +2845,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
        u32 off = skb_mac_header_len(skb);
        int ret;
 
-       /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
-       if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+       if (!skb_is_gso_tcp(skb))
                return -ENOTSUPP;
 
        ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2972,8 +2970,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
        u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
        int ret;
 
-       /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
-       if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+       if (!skb_is_gso_tcp(skb))
                return -ENOTSUPP;
 
        ret = skb_cow(skb, len_diff);
@@ -3002,8 +2999,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
        u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
        int ret;
 
-       /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
-       if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+       if (!skb_is_gso_tcp(skb))
                return -ENOTSUPP;
 
        ret = skb_unclone(skb, GFP_ATOMIC);
index 26d8484849126ea0a418b6290dcdcba30bf44c2f..2415d9cb9b89fefb30a7932a70c3497aeb67c80e 100644 (file)
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  */
 void *netdev_alloc_frag(unsigned int fragsz)
 {
+       fragsz = SKB_DATA_ALIGN(fragsz);
+
        return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(netdev_alloc_frag);
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 
 void *napi_alloc_frag(unsigned int fragsz)
 {
+       fragsz = SKB_DATA_ALIGN(fragsz);
+
        return __napi_alloc_frag(fragsz, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(napi_alloc_frag);
index d14226ecfde4dbd9b37387c49bcaa57e9e323847..bd61633d2c32a52810f8cec611f31f6cf95da9d6 100644 (file)
@@ -27,6 +27,7 @@
 #include <net/6lowpan.h>
 #include <net/ipv6_frag.h>
 #include <net/inet_frag.h>
+#include <net/ip.h>
 
 #include "6lowpan_i.h"
 
@@ -34,8 +35,8 @@ static const char lowpan_frags_cache_name[] = "lowpan-frags";
 
 static struct inet_frags lowpan_frags;
 
-static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
-                            struct sk_buff *prev, struct net_device *ldev);
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
+                            struct sk_buff *prev,  struct net_device *ldev);
 
 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
 {
@@ -88,9 +89,15 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
                             struct sk_buff *skb, u8 frag_type)
 {
-       struct sk_buff *prev, *next;
+       struct sk_buff *prev_tail;
        struct net_device *ldev;
-       int end, offset;
+       int end, offset, err;
+
+       /* inet_frag_queue_* functions use skb->cb; see struct ipfrag_skb_cb
+        * in inet_fragment.c
+        */
+       BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet_skb_parm));
+       BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet6_skb_parm));
 
        if (fq->q.flags & INET_FRAG_COMPLETE)
                goto err;
@@ -117,38 +124,15 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
                }
        }
 
-       /* Find out which fragments are in front and at the back of us
-        * in the chain of fragments so far.  We must know where to put
-        * this fragment, right?
-        */
-       prev = fq->q.fragments_tail;
-       if (!prev ||
-           lowpan_802154_cb(prev)->d_offset <
-           lowpan_802154_cb(skb)->d_offset) {
-               next = NULL;
-               goto found;
-       }
-       prev = NULL;
-       for (next = fq->q.fragments; next != NULL; next = next->next) {
-               if (lowpan_802154_cb(next)->d_offset >=
-                   lowpan_802154_cb(skb)->d_offset)
-                       break;  /* bingo! */
-               prev = next;
-       }
-
-found:
-       /* Insert this fragment in the chain of fragments. */
-       skb->next = next;
-       if (!next)
-               fq->q.fragments_tail = skb;
-       if (prev)
-               prev->next = skb;
-       else
-               fq->q.fragments = skb;
-
        ldev = skb->dev;
        if (ldev)
                skb->dev = NULL;
+       barrier();
+
+       prev_tail = fq->q.fragments_tail;
+       err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+       if (err)
+               goto err;
 
        fq->q.stamp = skb->tstamp;
        if (frag_type == LOWPAN_DISPATCH_FRAG1)
@@ -163,10 +147,11 @@ found:
                unsigned long orefdst = skb->_skb_refdst;
 
                skb->_skb_refdst = 0UL;
-               res = lowpan_frag_reasm(fq, prev, ldev);
+               res = lowpan_frag_reasm(fq, skb, prev_tail, ldev);
                skb->_skb_refdst = orefdst;
                return res;
        }
+       skb_dst_drop(skb);
 
        return -1;
 err:
@@ -175,97 +160,29 @@ err:
 }
 
 /*     Check if this packet is complete.
- *     Returns NULL on failure by any reason, and pointer
- *     to current nexthdr field in reassembled frame.
  *
  *     It is called with locked fq, and caller must check that
  *     queue is eligible for reassembly i.e. it is not COMPLETE,
  *     the last and the first frames arrived and all the bits are here.
  */
-static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
-                            struct net_device *ldev)
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
+                            struct sk_buff *prev_tail, struct net_device *ldev)
 {
-       struct sk_buff *fp, *head = fq->q.fragments;
-       int sum_truesize;
+       void *reasm_data;
 
        inet_frag_kill(&fq->q);
 
-       /* Make the one we just received the head. */
-       if (prev) {
-               head = prev->next;
-               fp = skb_clone(head, GFP_ATOMIC);
-
-               if (!fp)
-                       goto out_oom;
-
-               fp->next = head->next;
-               if (!fp->next)
-                       fq->q.fragments_tail = fp;
-               prev->next = fp;
-
-               skb_morph(head, fq->q.fragments);
-               head->next = fq->q.fragments->next;
-
-               consume_skb(fq->q.fragments);
-               fq->q.fragments = head;
-       }
-
-       /* Head of list must not be cloned. */
-       if (skb_unclone(head, GFP_ATOMIC))
+       reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+       if (!reasm_data)
                goto out_oom;
+       inet_frag_reasm_finish(&fq->q, skb, reasm_data);
 
-       /* If the first fragment is fragmented itself, we split
-        * it to two chunks: the first with data and paged part
-        * and the second, holding only fragments.
-        */
-       if (skb_has_frag_list(head)) {
-               struct sk_buff *clone;
-               int i, plen = 0;
-
-               clone = alloc_skb(0, GFP_ATOMIC);
-               if (!clone)
-                       goto out_oom;
-               clone->next = head->next;
-               head->next = clone;
-               skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
-               skb_frag_list_init(head);
-               for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
-                       plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
-               clone->len = head->data_len - plen;
-               clone->data_len = clone->len;
-               head->data_len -= clone->len;
-               head->len -= clone->len;
-               add_frag_mem_limit(fq->q.net, clone->truesize);
-       }
-
-       WARN_ON(head == NULL);
-
-       sum_truesize = head->truesize;
-       for (fp = head->next; fp;) {
-               bool headstolen;
-               int delta;
-               struct sk_buff *next = fp->next;
-
-               sum_truesize += fp->truesize;
-               if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
-                       kfree_skb_partial(fp, headstolen);
-               } else {
-                       if (!skb_shinfo(head)->frag_list)
-                               skb_shinfo(head)->frag_list = fp;
-                       head->data_len += fp->len;
-                       head->len += fp->len;
-                       head->truesize += fp->truesize;
-               }
-               fp = next;
-       }
-       sub_frag_mem_limit(fq->q.net, sum_truesize);
-
-       skb_mark_not_on_list(head);
-       head->dev = ldev;
-       head->tstamp = fq->q.stamp;
-
+       skb->dev = ldev;
+       skb->tstamp = fq->q.stamp;
        fq->q.fragments = NULL;
+       fq->q.rb_fragments = RB_ROOT;
        fq->q.fragments_tail = NULL;
+       fq->q.last_run_head = NULL;
 
        return 1;
 out_oom:
index 0dfb72c4667129e878c4a6086abeea35936bd14f..eab3ebde981e78a6a0a4852c3b4374c02ede1187 100644 (file)
@@ -1385,6 +1385,15 @@ out:
 }
 EXPORT_SYMBOL(inet_gso_segment);
 
+static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
+                                       netdev_features_t features)
+{
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
+               return ERR_PTR(-EINVAL);
+
+       return inet_gso_segment(skb, features);
+}
+
 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *,
                                                           struct sk_buff *));
 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
@@ -1861,7 +1870,7 @@ static struct packet_offload ip_packet_offload __read_mostly = {
 
 static const struct net_offload ipip_offload = {
        .callbacks = {
-               .gso_segment    = inet_gso_segment,
+               .gso_segment    = ipip_gso_segment,
                .gro_receive    = ipip_gro_receive,
                .gro_complete   = ipip_gro_complete,
        },
index cab6b2f2f61dd30c1052e48d4597d93f9378eb51..769508c75dce5a39bd78766d801ca01abb77f4bb 100644 (file)
@@ -2546,6 +2546,7 @@ void tcp_write_queue_purge(struct sock *sk)
        sk_mem_reclaim(sk);
        tcp_clear_all_retrans_hints(tcp_sk(sk));
        tcp_sk(sk)->packets_out = 0;
+       inet_csk(sk)->icsk_backoff = 0;
 }
 
 int tcp_disconnect(struct sock *sk, int flags)
@@ -2596,6 +2597,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        if (tp->write_seq == 0)
                tp->write_seq = 1;
        icsk->icsk_backoff = 0;
+       tp->snd_cwnd = 2;
        icsk->icsk_probes_out = 0;
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
index 662b034f17952238b8081c5bcffee2a175af7786..4010ae3644f3101d01ee984c21256ca381588a3e 100644 (file)
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                if (sock_owned_by_user(sk))
                        break;
 
+               skb = tcp_rtx_queue_head(sk);
+               if (WARN_ON_ONCE(!skb))
+                       break;
+
                icsk->icsk_backoff--;
                icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
                                               TCP_TIMEOUT_INIT;
                icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 
-               skb = tcp_rtx_queue_head(sk);
 
                tcp_mstamp_refresh(tp);
                delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
index 65a4f96dc462909652fe7065f172f0b2d7ef8f34..bb525abd860e0488cb40365b310f2428d0a7a8a1 100644 (file)
@@ -1719,6 +1719,24 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
        return 0;
 }
 
+static void ip6erspan_set_version(struct nlattr *data[],
+                                 struct __ip6_tnl_parm *parms)
+{
+       parms->erspan_ver = 1;
+       if (data[IFLA_GRE_ERSPAN_VER])
+               parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+
+       if (parms->erspan_ver == 1) {
+               if (data[IFLA_GRE_ERSPAN_INDEX])
+                       parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+       } else if (parms->erspan_ver == 2) {
+               if (data[IFLA_GRE_ERSPAN_DIR])
+                       parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+               if (data[IFLA_GRE_ERSPAN_HWID])
+                       parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+       }
+}
+
 static void ip6gre_netlink_parms(struct nlattr *data[],
                                struct __ip6_tnl_parm *parms)
 {
@@ -1767,20 +1785,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
 
        if (data[IFLA_GRE_COLLECT_METADATA])
                parms->collect_md = true;
-
-       parms->erspan_ver = 1;
-       if (data[IFLA_GRE_ERSPAN_VER])
-               parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
-
-       if (parms->erspan_ver == 1) {
-               if (data[IFLA_GRE_ERSPAN_INDEX])
-                       parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
-       } else if (parms->erspan_ver == 2) {
-               if (data[IFLA_GRE_ERSPAN_DIR])
-                       parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
-               if (data[IFLA_GRE_ERSPAN_HWID])
-                       parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
-       }
 }
 
 static int ip6gre_tap_init(struct net_device *dev)
@@ -2203,6 +2207,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
        int err;
 
        ip6gre_netlink_parms(data, &nt->parms);
+       ip6erspan_set_version(data, &nt->parms);
        ign = net_generic(net, ip6gre_net_id);
 
        if (nt->parms.collect_md) {
@@ -2248,6 +2253,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (IS_ERR(t))
                return PTR_ERR(t);
 
+       ip6erspan_set_version(data, &p);
        ip6gre_tunnel_unlink_md(ign, t);
        ip6gre_tunnel_unlink(ign, t);
        ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
index 5c045691c30203d3f8d2d12957e283bc01cae259..345882d9c0618e6b9f6cb0c5b41f3e5abde8d1d3 100644 (file)
@@ -383,9 +383,36 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
        },
 };
 
+static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
+                                      netdev_features_t features)
+{
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
+               return ERR_PTR(-EINVAL);
+
+       return ipv6_gso_segment(skb, features);
+}
+
+static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
+                                         netdev_features_t features)
+{
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
+               return ERR_PTR(-EINVAL);
+
+       return inet_gso_segment(skb, features);
+}
+
+static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
+                                         netdev_features_t features)
+{
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
+               return ERR_PTR(-EINVAL);
+
+       return ipv6_gso_segment(skb, features);
+}
+
 static const struct net_offload sit_offload = {
        .callbacks = {
-               .gso_segment    = ipv6_gso_segment,
+               .gso_segment    = sit_gso_segment,
                .gro_receive    = sit_ip6ip6_gro_receive,
                .gro_complete   = sit_gro_complete,
        },
@@ -393,7 +420,7 @@ static const struct net_offload sit_offload = {
 
 static const struct net_offload ip4ip6_offload = {
        .callbacks = {
-               .gso_segment    = inet_gso_segment,
+               .gso_segment    = ip4ip6_gso_segment,
                .gro_receive    = ip4ip6_gro_receive,
                .gro_complete   = ip4ip6_gro_complete,
        },
@@ -401,7 +428,7 @@ static const struct net_offload ip4ip6_offload = {
 
 static const struct net_offload ip6ip6_offload = {
        .callbacks = {
-               .gso_segment    = ipv6_gso_segment,
+               .gso_segment    = ip6ip6_gso_segment,
                .gro_receive    = sit_ip6ip6_gro_receive,
                .gro_complete   = ip6ip6_gro_complete,
        },
index d65aa019ce85cb0d14b398b6c3566b02d9ddda2a..09dd1c2860fc030f584fb895ca12483a13592823 100644 (file)
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
                      BSS_CHANGED_P2P_PS |
                      BSS_CHANGED_TXPOWER;
        int err;
+       int prev_beacon_int;
 
        old = sdata_dereference(sdata->u.ap.beacon, sdata);
        if (old)
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
 
        sdata->needed_rx_chains = sdata->local->rx_chains;
 
+       prev_beacon_int = sdata->vif.bss_conf.beacon_int;
        sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 
        if (params->he_cap)
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
        if (!err)
                ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
        mutex_unlock(&local->mtx);
-       if (err)
+       if (err) {
+               sdata->vif.bss_conf.beacon_int = prev_beacon_int;
                return err;
+       }
 
        /*
         * Apply control port protocol, this allows us to
index 8b26858ab4d5dfc9c3e6e2610266d03ac29a41f1..574c3891c4b2e8b6d7c6031811acdb50b94355be 100644 (file)
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
  * @dst: mesh path destination mac address
  * @mpp: mesh proxy mac address
  * @rhash: rhashtable list pointer
+ * @walk_list: linked list containing all mesh_path objects.
  * @gate_list: list pointer for known gates list
  * @sdata: mesh subif
  * @next_hop: mesh neighbor to which frames for this destination will be
@@ -106,6 +107,7 @@ struct mesh_path {
        u8 dst[ETH_ALEN];
        u8 mpp[ETH_ALEN];       /* used for MPP or MAP */
        struct rhash_head rhash;
+       struct hlist_node walk_list;
        struct hlist_node gate_list;
        struct ieee80211_sub_if_data *sdata;
        struct sta_info __rcu *next_hop;
@@ -135,12 +137,16 @@ struct mesh_path {
  * gate's mpath may or may not be resolved and active.
  * @gates_lock: protects updates to known_gates
  * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containging all mesh_path objects
+ * @walk_lock: lock protecting walk_head
  * @entries: number of entries in the table
  */
 struct mesh_table {
        struct hlist_head known_gates;
        spinlock_t gates_lock;
        struct rhashtable rhead;
+       struct hlist_head walk_head;
+       spinlock_t walk_lock;
        atomic_t entries;               /* Up to MAX_MESH_NEIGHBOURS */
 };
 
index a5125624a76dce205b28e916a505faae765539ae..88a6d5e18ccc9e13d20af8736f82ed6fa42a8ce5 100644 (file)
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
                return NULL;
 
        INIT_HLIST_HEAD(&newtbl->known_gates);
+       INIT_HLIST_HEAD(&newtbl->walk_head);
        atomic_set(&newtbl->entries,  0);
        spin_lock_init(&newtbl->gates_lock);
+       spin_lock_init(&newtbl->walk_lock);
 
        return newtbl;
 }
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
 static struct mesh_path *
 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
 {
-       int i = 0, ret;
-       struct mesh_path *mpath = NULL;
-       struct rhashtable_iter iter;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return NULL;
-
-       rhashtable_walk_start(&iter);
+       int i = 0;
+       struct mesh_path *mpath;
 
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
                if (i++ == idx)
                        break;
        }
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
 
-       if (IS_ERR(mpath) || !mpath)
+       if (!mpath)
                return NULL;
 
        if (mpath_expired(mpath)) {
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
                return ERR_PTR(-ENOMEM);
 
        tbl = sdata->u.mesh.mesh_paths;
+       spin_lock_bh(&tbl->walk_lock);
        do {
                ret = rhashtable_lookup_insert_fast(&tbl->rhead,
                                                    &new_mpath->rhash,
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
                        mpath = rhashtable_lookup_fast(&tbl->rhead,
                                                       dst,
                                                       mesh_rht_params);
-
+               else if (!ret)
+                       hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
        } while (unlikely(ret == -EEXIST && !mpath));
+       spin_unlock_bh(&tbl->walk_lock);
 
-       if (ret && ret != -EEXIST)
-               return ERR_PTR(ret);
-
-       /* At this point either new_mpath was added, or we found a
-        * matching entry already in the table; in the latter case
-        * free the unnecessary new entry.
-        */
-       if (ret == -EEXIST) {
+       if (ret) {
                kfree(new_mpath);
+
+               if (ret != -EEXIST)
+                       return ERR_PTR(ret);
+
                new_mpath = mpath;
        }
+
        sdata->u.mesh.mesh_paths_generation++;
        return new_mpath;
 }
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
 
        memcpy(new_mpath->mpp, mpp, ETH_ALEN);
        tbl = sdata->u.mesh.mpp_paths;
+
+       spin_lock_bh(&tbl->walk_lock);
        ret = rhashtable_lookup_insert_fast(&tbl->rhead,
                                            &new_mpath->rhash,
                                            mesh_rht_params);
+       if (!ret)
+               hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
+       spin_unlock_bh(&tbl->walk_lock);
+
+       if (ret)
+               kfree(new_mpath);
 
        sdata->u.mesh.mpp_paths_generation++;
        return ret;
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
        struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
        static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return;
 
-       rhashtable_walk_start(&iter);
-
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
                if (rcu_access_pointer(mpath->next_hop) == sta &&
                    mpath->flags & MESH_PATH_ACTIVE &&
                    !(mpath->flags & MESH_PATH_FIXED)) {
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
                                WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
                }
        }
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       rcu_read_unlock();
 }
 
 static void mesh_path_free_rcu(struct mesh_table *tbl,
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
 
 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
 {
+       hlist_del_rcu(&mpath->walk_list);
        rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
        mesh_path_free_rcu(tbl, mpath);
 }
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return;
-
-       rhashtable_walk_start(&iter);
-
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       struct hlist_node *n;
 
+       spin_lock_bh(&tbl->walk_lock);
+       hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
                if (rcu_access_pointer(mpath->next_hop) == sta)
                        __mesh_path_del(tbl, mpath);
        }
-
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       spin_unlock_bh(&tbl->walk_lock);
 }
 
 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
 {
        struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return;
-
-       rhashtable_walk_start(&iter);
-
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       struct hlist_node *n;
 
+       spin_lock_bh(&tbl->walk_lock);
+       hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
                if (ether_addr_equal(mpath->mpp, proxy))
                        __mesh_path_del(tbl, mpath);
        }
-
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       spin_unlock_bh(&tbl->walk_lock);
 }
 
 static void table_flush_by_iface(struct mesh_table *tbl)
 {
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return;
-
-       rhashtable_walk_start(&iter);
+       struct hlist_node *n;
 
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       spin_lock_bh(&tbl->walk_lock);
+       hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
                __mesh_path_del(tbl, mpath);
        }
-
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       spin_unlock_bh(&tbl->walk_lock);
 }
 
 /**
@@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl,
 {
        struct mesh_path *mpath;
 
-       rcu_read_lock();
+       spin_lock_bh(&tbl->walk_lock);
        mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
        if (!mpath) {
-               rcu_read_unlock();
+               spin_unlock_bh(&tbl->walk_lock);
                return -ENXIO;
        }
 
        __mesh_path_del(tbl, mpath);
-       rcu_read_unlock();
+       spin_unlock_bh(&tbl->walk_lock);
        return 0;
 }
 
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
                          struct mesh_table *tbl)
 {
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
+       struct hlist_node *n;
 
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
-       if (ret)
-               return;
-
-       rhashtable_walk_start(&iter);
-
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       spin_lock_bh(&tbl->walk_lock);
+       hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
                if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
                    (!(mpath->flags & MESH_PATH_FIXED)) &&
                     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
                        __mesh_path_del(tbl, mpath);
        }
-
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       spin_unlock_bh(&tbl->walk_lock);
 }
 
 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
index 3c36480559f96bd3a54ba9ba715acf51782def0a..2a3d4e27cf3b922570bc203bfb73ba640f51faf4 100644 (file)
@@ -896,12 +896,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
 {
        struct ip_vs_dest *dest;
        unsigned int atype, i;
-       int ret = 0;
 
        EnterFunction(2);
 
 #ifdef CONFIG_IP_VS_IPV6
        if (udest->af == AF_INET6) {
+               int ret;
+
                atype = ipv6_addr_type(&udest->addr.in6);
                if ((!(atype & IPV6_ADDR_UNICAST) ||
                        atype & IPV6_ADDR_LINKLOCAL) &&
index de3f1e2acae02a857550660e8de9369a4043a006..e1a88ba2249e5cdc810e7b1f8f569f20257e0047 100644 (file)
@@ -328,6 +328,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
        int err;
 
        list_for_each_entry(rule, &ctx->chain->rules, list) {
+               if (!nft_is_active_next(ctx->net, rule))
+                       continue;
+
                err = nft_delrule(ctx, rule);
                if (err < 0)
                        return err;
index d00a0ef39a56b38cae4114654c44a3bddccb35ba..c96f63ffe31e5fe543b4d033d4342a1e405c6d7b 100644 (file)
@@ -689,8 +689,10 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                rose->source_call = user->call;
                ax25_uid_put(user);
        } else {
-               if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
+               if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
+                       dev_put(dev);
                        return -EACCES;
+               }
                rose->source_call   = *source;
        }
 
index e1981628047b0d022731b59965207fcbe8055213..fbf3519a12d8b9919898743c38a94d0fadca5828 100644 (file)
@@ -48,7 +48,7 @@ struct tcindex_data {
        u32 hash;               /* hash table size; 0 if undefined */
        u32 alloc_hash;         /* allocated size */
        u32 fall_through;       /* 0: only classify if explicit match */
-       struct rcu_head rcu;
+       struct rcu_work rwork;
 };
 
 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
@@ -221,17 +221,11 @@ found:
        return 0;
 }
 
-static int tcindex_destroy_element(struct tcf_proto *tp,
-                                  void *arg, struct tcf_walker *walker)
-{
-       bool last;
-
-       return tcindex_delete(tp, arg, &last, false, NULL);
-}
-
-static void __tcindex_destroy(struct rcu_head *head)
+static void tcindex_destroy_work(struct work_struct *work)
 {
-       struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+       struct tcindex_data *p = container_of(to_rcu_work(work),
+                                             struct tcindex_data,
+                                             rwork);
 
        kfree(p->perfect);
        kfree(p->h);
@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
        return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
 }
 
-static void __tcindex_partial_destroy(struct rcu_head *head)
+static void tcindex_partial_destroy_work(struct work_struct *work)
 {
-       struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+       struct tcindex_data *p = container_of(to_rcu_work(work),
+                                             struct tcindex_data,
+                                             rwork);
 
        kfree(p->perfect);
        kfree(p);
@@ -480,7 +476,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        }
 
        if (oldp)
-               call_rcu(&oldp->rcu, __tcindex_partial_destroy);
+               tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
        return 0;
 
 errout_alloc:
@@ -564,15 +560,34 @@ static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
                            struct netlink_ext_ack *extack)
 {
        struct tcindex_data *p = rtnl_dereference(tp->root);
-       struct tcf_walker walker;
+       int i;
 
        pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
-       walker.count = 0;
-       walker.skip = 0;
-       walker.fn = tcindex_destroy_element;
-       tcindex_walk(tp, &walker, true);
 
-       call_rcu(&p->rcu, __tcindex_destroy);
+       if (p->perfect) {
+               for (i = 0; i < p->hash; i++) {
+                       struct tcindex_filter_result *r = p->perfect + i;
+
+                       tcf_unbind_filter(tp, &r->res);
+                       if (tcf_exts_get_net(&r->exts))
+                               tcf_queue_work(&r->rwork,
+                                              tcindex_destroy_rexts_work);
+                       else
+                               __tcindex_destroy_rexts(r);
+               }
+       }
+
+       for (i = 0; p->h && i < p->hash; i++) {
+               struct tcindex_filter *f, *next;
+               bool last;
+
+               for (f = rtnl_dereference(p->h[i]); f; f = next) {
+                       next = rtnl_dereference(f->next);
+                       tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
+               }
+       }
+
+       tcf_queue_work(&p->rwork, tcindex_destroy_work);
 }
 
 
index fb6656295204cf6d2fca0451b8ba93a3e5fac057..507105127095a24ca89d49b12118d3e12ce85726 100644 (file)
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
                      unsigned char *cksum, unsigned char *buf)
 {
        struct crypto_sync_skcipher *cipher;
-       unsigned char plain[8];
+       unsigned char *plain;
        s32 code;
 
        dprintk("RPC:       %s:\n", __func__);
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
        if (IS_ERR(cipher))
                return PTR_ERR(cipher);
 
+       plain = kmalloc(8, GFP_NOFS);
+       if (!plain)
+               return -ENOMEM;
+
        plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
        plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
        plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
 
        code = krb5_encrypt(cipher, cksum, plain, buf, 8);
 out:
+       kfree(plain);
        crypto_free_sync_skcipher(cipher);
        return code;
 }
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
                u32 seqnum,
                unsigned char *cksum, unsigned char *buf)
 {
-       unsigned char plain[8];
+       unsigned char *plain;
+       s32 code;
 
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
                return krb5_make_rc4_seq_num(kctx, direction, seqnum,
                                             cksum, buf);
 
+       plain = kmalloc(8, GFP_NOFS);
+       if (!plain)
+               return -ENOMEM;
+
        plain[0] = (unsigned char) (seqnum & 0xff);
        plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
        plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
        plain[6] = direction;
        plain[7] = direction;
 
-       return krb5_encrypt(key, cksum, plain, buf, 8);
+       code = krb5_encrypt(key, cksum, plain, buf, 8);
+       kfree(plain);
+       return code;
 }
 
 static s32
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
                     unsigned char *buf, int *direction, s32 *seqnum)
 {
        struct crypto_sync_skcipher *cipher;
-       unsigned char plain[8];
+       unsigned char *plain;
        s32 code;
 
        dprintk("RPC:       %s:\n", __func__);
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
        if (code)
                goto out;
 
+       plain = kmalloc(8, GFP_NOFS);
+       if (!plain) {
+               code = -ENOMEM;
+               goto out;
+       }
+
        code = krb5_decrypt(cipher, cksum, buf, plain, 8);
        if (code)
-               goto out;
+               goto out_plain;
 
        if ((plain[4] != plain[5]) || (plain[4] != plain[6])
                                   || (plain[4] != plain[7])) {
                code = (s32)KG_BAD_SEQ;
-               goto out;
+               goto out_plain;
        }
 
        *direction = plain[4];
 
        *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
                                        (plain[2] << 8) | (plain[3]));
+out_plain:
+       kfree(plain);
 out:
        crypto_free_sync_skcipher(cipher);
        return code;
@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
               int *direction, u32 *seqnum)
 {
        s32 code;
-       unsigned char plain[8];
+       unsigned char *plain;
        struct crypto_sync_skcipher *key = kctx->seq;
 
        dprintk("RPC:       krb5_get_seq_num:\n");
@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
                return krb5_get_rc4_seq_num(kctx, cksum, buf,
                                            direction, seqnum);
+       plain = kmalloc(8, GFP_NOFS);
+       if (!plain)
+               return -ENOMEM;
 
        if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
-               return code;
+               goto out;
 
        if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
-           (plain[4] != plain[7]))
-               return (s32)KG_BAD_SEQ;
+           (plain[4] != plain[7])) {
+               code = (s32)KG_BAD_SEQ;
+               goto out;
+       }
 
        *direction = plain[4];
 
        *seqnum = ((plain[0]) |
                   (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
 
-       return 0;
+out:
+       kfree(plain);
+       return code;
 }
index 45a033329cd4a7156c41509a255e1ca1d969f2df..19bb356230edf521d78a4729e732ead121daaebd 100644 (file)
@@ -146,7 +146,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
        rcu_read_lock();
        xprt = rcu_dereference(clnt->cl_xprt);
        /* no "debugfs" dentry? Don't bother with the symlink. */
-       if (!xprt->debugfs) {
+       if (IS_ERR_OR_NULL(xprt->debugfs)) {
                rcu_read_unlock();
                return;
        }
index 4994e75945b848c1dfba523e6a23cb2d45613ef5..21113bfd4ecaf6a3cb79329af81fb40168830d35 100644 (file)
@@ -527,7 +527,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
 
        sendcq = ib_alloc_cq(ia->ri_device, NULL,
                             ep->rep_attr.cap.max_send_wr + 1,
-                            1, IB_POLL_WORKQUEUE);
+                            ia->ri_device->num_comp_vectors > 1 ? 1 : 0,
+                            IB_POLL_WORKQUEUE);
        if (IS_ERR(sendcq)) {
                rc = PTR_ERR(sendcq);
                goto out1;
index 5ab236c5c9a52136bc597d1c233db54eecf82610..77520eacee8f18da45781ea70cc82ee16e3de94f 100644 (file)
@@ -129,9 +129,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
        return 0;
 
 err_unreg_umem:
-       xdp_clear_umem_at_qid(dev, queue_id);
        if (!force_zc)
                err = 0; /* fallback to copy mode */
+       if (err)
+               xdp_clear_umem_at_qid(dev, queue_id);
 out_rtnl_unlock:
        rtnl_unlock();
        return err;
@@ -265,10 +266,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
        if (!umem->pgs)
                return -ENOMEM;
 
-       down_write(&current->mm->mmap_sem);
-       npgs = get_user_pages(umem->address, umem->npgs,
-                             gup_flags, &umem->pgs[0], NULL);
-       up_write(&current->mm->mmap_sem);
+       down_read(&current->mm->mmap_sem);
+       npgs = get_user_pages_longterm(umem->address, umem->npgs,
+                                      gup_flags, &umem->pgs[0], NULL);
+       up_read(&current->mm->mmap_sem);
 
        if (npgs != umem->npgs) {
                if (npgs >= 0) {
index 949d3bbccb2f5987cc1840a5a94ee713911f7ebb..41731c9bb26f66ff4aa4fa57357d7df736cd828a 100644 (file)
@@ -669,6 +669,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
                if (!umem)
                        return -EINVAL;
 
+               /* Matches the smp_wmb() in XDP_UMEM_REG */
+               smp_rmb();
                if (offset == XDP_UMEM_PGOFF_FILL_RING)
                        q = READ_ONCE(umem->fq);
                else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
@@ -678,6 +680,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
        if (!q)
                return -EINVAL;
 
+       /* Matches the smp_wmb() in xsk_init_queue */
+       smp_rmb();
        qpg = virt_to_head_page(q->ring);
        if (size > (PAGE_SIZE << compound_order(qpg)))
                return -EINVAL;
index 4ac50ccb3272f12f9860e562b517de628d47e372..47ddfc154036e358d01f32ee9f46250202c2d703 100644 (file)
@@ -753,6 +753,20 @@ TEST_F(tls, control_msg)
        EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
 
        vec.iov_base = buf;
+       EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len);
+
+       cmsg = CMSG_FIRSTHDR(&msg);
+       EXPECT_NE(cmsg, NULL);
+       EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+       EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+       record_type = *((unsigned char *)CMSG_DATA(cmsg));
+       EXPECT_EQ(record_type, 100);
+       EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+
+       /* Recv the message again without MSG_PEEK */
+       record_type = 0;
+       memset(buf, 0, sizeof(buf));
+
        EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len);
        cmsg = CMSG_FIRSTHDR(&msg);
        EXPECT_NE(cmsg, NULL);
index 9e350fd3450456a927ac2a0889ccaf2cfe6d6030..9c486fad3f9f8289e2d8989928a34459bb2dd105 100644 (file)
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
                /* Awaken to handle a signal, request we sleep again later. */
                kvm_make_request(KVM_REQ_SLEEP, vcpu);
        }
+
+       /*
+        * Make sure we will observe a potential reset request if we've
+        * observed a change to the power state. Pairs with the smp_wmb() in
+        * kvm_psci_vcpu_on().
+        */
+       smp_rmb();
 }
 
 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
                if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
                        vcpu_req_sleep(vcpu);
 
+               if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+                       kvm_reset_vcpu(vcpu);
+
                /*
                 * Clear IRQ_PENDING requests that were made to guarantee
                 * that a VCPU sees new virtual interrupts.
index fbdf3ac2f001069f35c92e6ef4c6be878e01221d..30251e2886298ea30489d106712c8fe958f48fb8 100644 (file)
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
        vma_pagesize = vma_kernel_pagesize(vma);
        /*
-        * PUD level may not exist for a VM but PMD is guaranteed to
-        * exist.
+        * The stage2 has a minimum of 2 level table (For arm64 see
+        * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
+        * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
+        * As for PUD huge maps, we must make sure that we have at least
+        * 3 levels, i.e, PMD is not folded.
         */
        if ((vma_pagesize == PMD_SIZE ||
-            (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) &&
+            (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
            !force_pte) {
                gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
        }
index 9b73d3ad918a4520a3a7ecf6897836abc0887f9c..34d08ee637471762529017a8341152468a5bdfd4 100644 (file)
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
 
 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
 {
+       struct vcpu_reset_state *reset_state;
        struct kvm *kvm = source_vcpu->kvm;
        struct kvm_vcpu *vcpu = NULL;
-       struct swait_queue_head *wq;
        unsigned long cpu_id;
-       unsigned long context_id;
-       phys_addr_t target_pc;
 
        cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
        if (vcpu_mode_is_32bit(source_vcpu))
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
                        return PSCI_RET_INVALID_PARAMS;
        }
 
-       target_pc = smccc_get_arg2(source_vcpu);
-       context_id = smccc_get_arg3(source_vcpu);
+       reset_state = &vcpu->arch.reset_state;
 
-       kvm_reset_vcpu(vcpu);
-
-       /* Gracefully handle Thumb2 entry point */
-       if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
-               target_pc &= ~((phys_addr_t) 1);
-               vcpu_set_thumb(vcpu);
-       }
+       reset_state->pc = smccc_get_arg2(source_vcpu);
 
        /* Propagate caller endianness */
-       if (kvm_vcpu_is_be(source_vcpu))
-               kvm_vcpu_set_be(vcpu);
+       reset_state->be = kvm_vcpu_is_be(source_vcpu);
 
-       *vcpu_pc(vcpu) = target_pc;
        /*
         * NOTE: We always update r0 (or x0) because for PSCI v0.1
         * the general puspose registers are undefined upon CPU_ON.
         */
-       smccc_set_retval(vcpu, context_id, 0, 0, 0);
-       vcpu->arch.power_off = false;
-       smp_mb();               /* Make sure the above is visible */
+       reset_state->r0 = smccc_get_arg3(source_vcpu);
+
+       WRITE_ONCE(reset_state->reset, true);
+       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
 
-       wq = kvm_arch_vcpu_wq(vcpu);
-       swake_up_one(wq);
+       /*
+        * Make sure the reset request is observed if the change to
+        * power_state is observed.
+        */
+       smp_wmb();
+
+       vcpu->arch.power_off = false;
+       kvm_vcpu_wake_up(vcpu);
 
        return PSCI_RET_SUCCESS;
 }
index 07aa900bac56a3aa5d85dadc462225905c2c4f0f..1f62f2b8065de78dfaf30d2ccb157ce42b4ad9b2 100644 (file)
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
                return 0;
        }
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        print_irq_state(s, irq, vcpu);
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
        vgic_put_irq(kvm, irq);
        return 0;
index c0c0b88af1d58c0e1c3015424be66272f4873845..3bdb31eaed641d721ba2b3df3b694d291caf2e9a 100644 (file)
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
        struct vgic_dist *dist = &kvm->arch.vgic;
 
        INIT_LIST_HEAD(&dist->lpi_list_head);
-       spin_lock_init(&dist->lpi_list_lock);
+       raw_spin_lock_init(&dist->lpi_list_lock);
 }
 
 /* CREATION */
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
 
                irq->intid = i + VGIC_NR_PRIVATE_IRQS;
                INIT_LIST_HEAD(&irq->ap_list);
-               spin_lock_init(&irq->irq_lock);
+               raw_spin_lock_init(&irq->irq_lock);
                irq->vcpu = NULL;
                irq->target_vcpu = vcpu0;
                kref_init(&irq->refcount);
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
        vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
 
        INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
-       spin_lock_init(&vgic_cpu->ap_list_lock);
+       raw_spin_lock_init(&vgic_cpu->ap_list_lock);
 
        /*
         * Enable and configure all SGIs to be edge-triggered and
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
                struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
 
                INIT_LIST_HEAD(&irq->ap_list);
-               spin_lock_init(&irq->irq_lock);
+               raw_spin_lock_init(&irq->irq_lock);
                irq->intid = i;
                irq->vcpu = NULL;
                irq->target_vcpu = vcpu;
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
                        irq->config = VGIC_CONFIG_LEVEL;
                }
 
-               /*
-                * GICv3 can only be created via the KVM_DEVICE_CREATE API and
-                * so we always know the emulation type at this point as it's
-                * either explicitly configured as GICv3, or explicitly
-                * configured as GICv2, or not configured yet which also
-                * implies GICv2.
-                */
                if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
                        irq->group = 1;
                else
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
        struct kvm_vcpu *vcpu;
-       int ret = 0, i;
+       int ret = 0, i, idx;
 
        if (vgic_initialized(kvm))
                return 0;
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
        if (ret)
                goto out;
 
+       /* Initialize groups on CPUs created before the VGIC type was known */
+       kvm_for_each_vcpu(idx, vcpu, kvm) {
+               struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+               for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+                       struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
+                       if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+                               irq->group = 1;
+                       else
+                               irq->group = 0;
+               }
+       }
+
        if (vgic_has_its(kvm)) {
                ret = vgic_v4_init(kvm);
                if (ret)
index eb2a390a6c864393b9cba89eaa544ec1972bda8d..ab3f47745d9caaedf263577c4a7263d6386f70b5 100644 (file)
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
 
        INIT_LIST_HEAD(&irq->lpi_list);
        INIT_LIST_HEAD(&irq->ap_list);
-       spin_lock_init(&irq->irq_lock);
+       raw_spin_lock_init(&irq->irq_lock);
 
        irq->config = VGIC_CONFIG_EDGE;
        kref_init(&irq->refcount);
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
        irq->target_vcpu = vcpu;
        irq->group = 1;
 
-       spin_lock_irqsave(&dist->lpi_list_lock, flags);
+       raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 
        /*
         * There could be a race with another vgic_add_lpi(), so we need to
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
        dist->lpi_list_count++;
 
 out_unlock:
-       spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+       raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 
        /*
         * We "cache" the configuration table entries in our struct vgic_irq's.
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
        if (ret)
                return ret;
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
        if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
                irq->priority = LPI_PROP_PRIORITY(prop);
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
                }
        }
 
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
        if (irq->hw)
                return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
        if (!intids)
                return -ENOMEM;
 
-       spin_lock_irqsave(&dist->lpi_list_lock, flags);
+       raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
        list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
                if (i == irq_count)
                        break;
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
                        continue;
                intids[i++] = irq->intid;
        }
-       spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+       raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 
        *intid_ptr = intids;
        return i;
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
        int ret = 0;
        unsigned long flags;
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        irq->target_vcpu = vcpu;
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
        if (irq->hw) {
                struct its_vlpi_map map;
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
                }
 
                irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                irq->pending_latch = pendmask & (1U << bit_nr);
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                vgic_put_irq(vcpu->kvm, irq);
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
                return irq_set_irqchip_state(irq->host_irq,
                                             IRQCHIP_STATE_PENDING, true);
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        irq->pending_latch = true;
        vgic_queue_irq_unlock(kvm, irq, flags);
 
index 738b65d2d0e76587d24fa0a44f7107c05400cc49..b535fffc740089f0215259cd5080fd6eca3f081b 100644 (file)
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
 
                irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                irq->pending_latch = true;
                irq->source |= 1U << source_vcpu->vcpu_id;
 
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
                int target;
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
                irq->targets = (val >> (i * 8)) & cpu_mask;
                target = irq->targets ? __ffs(irq->targets) : 0;
                irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
 
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
        for (i = 0; i < len; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
                irq->source &= ~((val >> (i * 8)) & 0xff);
                if (!irq->source)
                        irq->pending_latch = false;
 
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
        for (i = 0; i < len; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
                irq->source |= (val >> (i * 8)) & 0xff;
 
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
                        irq->pending_latch = true;
                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                } else {
-                       spin_unlock_irqrestore(&irq->irq_lock, flags);
+                       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                }
                vgic_put_irq(vcpu->kvm, irq);
        }
index b3d1f098511787766b4fbd3567fb87779f5725b2..4a12322bf7df81215d705eb3f7b5ab825d625fdc 100644 (file)
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
        if (!irq)
                return;
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
        /* We only care about and preserve Aff0, Aff1 and Aff2. */
        irq->mpidr = val & GENMASK(23, 0);
        irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
 
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 }
 
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
        for (i = 0; i < len * 8; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                if (test_bit(i, &val)) {
                        /*
                         * pending_latch is set irrespective of irq type
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                } else {
                        irq->pending_latch = false;
-                       spin_unlock_irqrestore(&irq->irq_lock, flags);
+                       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                }
 
                vgic_put_irq(vcpu->kvm, irq);
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
 
                irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
                /*
                 * An access targetting Group0 SGIs can only generate
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
                        irq->pending_latch = true;
                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                } else {
-                       spin_unlock_irqrestore(&irq->irq_lock, flags);
+                       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                }
 
                vgic_put_irq(vcpu->kvm, irq);
index ceeda7e04a4d9aa57932adeb1140e9d9348e2a0d..7de42fba05b5c7f11a4f254ca69c346d7e48e5d6 100644 (file)
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
        for (i = 0; i < len * 8; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                irq->group = !!(val & BIT(i));
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                irq->enabled = true;
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
                irq->enabled = false;
 
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
                unsigned long flags;
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                if (irq_is_pending(irq))
                        value |= (1U << i);
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
                vgic_put_irq(vcpu->kvm, irq);
        }
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                if (irq->hw)
                        vgic_hw_irq_spending(vcpu, irq, is_uaccess);
                else
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
                if (irq->hw)
                        vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
                else
                        irq->pending_latch = false;
 
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
        unsigned long flags;
        struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
        if (irq->hw) {
                vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
        if (irq->active)
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
        else
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 }
 
 /*
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
        for (i = 0; i < len; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                /* Narrow the priority range to what we actually support */
                irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
                vgic_put_irq(vcpu->kvm, irq);
        }
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
                        continue;
 
                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
                if (test_bit(i * 2 + 1, &val))
                        irq->config = VGIC_CONFIG_EDGE;
                else
                        irq->config = VGIC_CONFIG_LEVEL;
 
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
                 * restore irq config before line level.
                 */
                new_level = !!(val & (1U << i));
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                irq->line_level = new_level;
                if (new_level)
                        vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                else
-                       spin_unlock_irqrestore(&irq->irq_lock, flags);
+                       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
                vgic_put_irq(vcpu->kvm, irq);
        }
index 69b892abd7dc6faec17e820fe6f4c0f95800ecb3..d91a8938aa7c4eea3c3ee9567be54e5531410e5c 100644 (file)
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
 
                irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
 
-               spin_lock(&irq->irq_lock);
+               raw_spin_lock(&irq->irq_lock);
 
                /* Always preserve the active bit */
                irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
                                vgic_irq_set_phys_active(irq, false);
                }
 
-               spin_unlock(&irq->irq_lock);
+               raw_spin_unlock(&irq->irq_lock);
                vgic_put_irq(vcpu->kvm, irq);
        }
 
index 9c0dd234ebe8112f601e81083de436d5547b5c55..4ee0aeb9a9058165bce669c92217cbe73fce1fe6 100644 (file)
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
                if (!irq)       /* An LPI could have been unmapped. */
                        continue;
 
-               spin_lock(&irq->irq_lock);
+               raw_spin_lock(&irq->irq_lock);
 
                /* Always preserve the active bit */
                irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
                                vgic_irq_set_phys_active(irq, false);
                }
 
-               spin_unlock(&irq->irq_lock);
+               raw_spin_unlock(&irq->irq_lock);
                vgic_put_irq(vcpu->kvm, irq);
        }
 
@@ -347,9 +347,9 @@ retry:
 
        status = val & (1 << bit_nr);
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        if (irq->target_vcpu != vcpu) {
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                goto retry;
        }
        irq->pending_latch = status;
index 870b1185173b610c9d7fdd42363efe6fdee08868..abd9c735267784a3085b797d33133a579dc629c6 100644 (file)
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
  * When taking more than one ap_list_lock at the same time, always take the
  * lowest numbered VCPU's ap_list_lock first, so:
  *   vcpuX->vcpu_id < vcpuY->vcpu_id:
- *     spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
- *     spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
+ *     raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
+ *     raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
  *
  * Since the VGIC must support injecting virtual interrupts from ISRs, we have
- * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
+ * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
  * spinlocks for any lock that may be taken while injecting an interrupt.
  */
 
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
        struct vgic_irq *irq = NULL;
        unsigned long flags;
 
-       spin_lock_irqsave(&dist->lpi_list_lock, flags);
+       raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 
        list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
                if (irq->intid != intid)
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
        irq = NULL;
 
 out_unlock:
-       spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+       raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 
        return irq;
 }
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
        if (irq->intid < VGIC_MIN_LPI)
                return;
 
-       spin_lock_irqsave(&dist->lpi_list_lock, flags);
+       raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
        if (!kref_put(&irq->refcount, vgic_irq_release)) {
-               spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+               raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
                return;
        };
 
        list_del(&irq->lpi_list);
        dist->lpi_list_count--;
-       spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+       raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 
        kfree(irq);
 }
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
        bool penda, pendb;
        int ret;
 
-       spin_lock(&irqa->irq_lock);
-       spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
+       raw_spin_lock(&irqa->irq_lock);
+       raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
 
        if (irqa->active || irqb->active) {
                ret = (int)irqb->active - (int)irqa->active;
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
        /* Both pending and enabled, sort by priority */
        ret = irqa->priority - irqb->priority;
 out:
-       spin_unlock(&irqb->irq_lock);
-       spin_unlock(&irqa->irq_lock);
+       raw_spin_unlock(&irqb->irq_lock);
+       raw_spin_unlock(&irqa->irq_lock);
        return ret;
 }
 
@@ -325,7 +325,7 @@ retry:
                 * not need to be inserted into an ap_list and there is also
                 * no more work for us to do.
                 */
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
                /*
                 * We have to kick the VCPU here, because we could be
@@ -347,12 +347,12 @@ retry:
         * We must unlock the irq lock to take the ap_list_lock where
         * we are going to insert this new pending interrupt.
         */
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
        /* someone can do stuff here, which we re-check below */
 
-       spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
-       spin_lock(&irq->irq_lock);
+       raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
+       raw_spin_lock(&irq->irq_lock);
 
        /*
         * Did something change behind our backs?
@@ -367,10 +367,11 @@ retry:
         */
 
        if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
-               spin_unlock(&irq->irq_lock);
-               spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
+               raw_spin_unlock(&irq->irq_lock);
+               raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
+                                          flags);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
                goto retry;
        }
 
@@ -382,8 +383,8 @@ retry:
        list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
        irq->vcpu = vcpu;
 
-       spin_unlock(&irq->irq_lock);
-       spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
+       raw_spin_unlock(&irq->irq_lock);
+       raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
 
        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
        kvm_vcpu_kick(vcpu);
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
        if (!irq)
                return -EINVAL;
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
        if (!vgic_validate_injection(irq, level, owner)) {
                /* Nothing to see here, move along... */
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(kvm, irq);
                return 0;
        }
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
 
        BUG_ON(!irq);
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
        return ret;
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
        if (!irq->hw)
                goto out;
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        irq->active = false;
        irq->pending_latch = false;
        irq->line_level = false;
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 out:
        vgic_put_irq(vcpu->kvm, irq);
 }
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
        irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
        BUG_ON(!irq);
 
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        kvm_vgic_unmap_irq(irq);
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
        return 0;
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
                return -EINVAL;
 
        irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        if (irq->owner && irq->owner != owner)
                ret = -EEXIST;
        else
                irq->owner = owner;
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
        return ret;
 }
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
        DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
 retry:
-       spin_lock(&vgic_cpu->ap_list_lock);
+       raw_spin_lock(&vgic_cpu->ap_list_lock);
 
        list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
                struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
                bool target_vcpu_needs_kick = false;
 
-               spin_lock(&irq->irq_lock);
+               raw_spin_lock(&irq->irq_lock);
 
                BUG_ON(vcpu != irq->vcpu);
 
@@ -616,7 +617,7 @@ retry:
                         */
                        list_del(&irq->ap_list);
                        irq->vcpu = NULL;
-                       spin_unlock(&irq->irq_lock);
+                       raw_spin_unlock(&irq->irq_lock);
 
                        /*
                         * This vgic_put_irq call matches the
@@ -631,14 +632,14 @@ retry:
 
                if (target_vcpu == vcpu) {
                        /* We're on the right CPU */
-                       spin_unlock(&irq->irq_lock);
+                       raw_spin_unlock(&irq->irq_lock);
                        continue;
                }
 
                /* This interrupt looks like it has to be migrated. */
 
-               spin_unlock(&irq->irq_lock);
-               spin_unlock(&vgic_cpu->ap_list_lock);
+               raw_spin_unlock(&irq->irq_lock);
+               raw_spin_unlock(&vgic_cpu->ap_list_lock);
 
                /*
                 * Ensure locking order by always locking the smallest
@@ -652,10 +653,10 @@ retry:
                        vcpuB = vcpu;
                }
 
-               spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
-               spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
-                                SINGLE_DEPTH_NESTING);
-               spin_lock(&irq->irq_lock);
+               raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+               raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
+                                     SINGLE_DEPTH_NESTING);
+               raw_spin_lock(&irq->irq_lock);
 
                /*
                 * If the affinity has been preserved, move the
@@ -675,9 +676,9 @@ retry:
                        target_vcpu_needs_kick = true;
                }
 
-               spin_unlock(&irq->irq_lock);
-               spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
-               spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+               raw_spin_unlock(&irq->irq_lock);
+               raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
+               raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
 
                if (target_vcpu_needs_kick) {
                        kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
@@ -687,7 +688,7 @@ retry:
                goto retry;
        }
 
-       spin_unlock(&vgic_cpu->ap_list_lock);
+       raw_spin_unlock(&vgic_cpu->ap_list_lock);
 }
 
 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
                int w;
 
-               spin_lock(&irq->irq_lock);
+               raw_spin_lock(&irq->irq_lock);
                /* GICv2 SGIs can count for more than one... */
                w = vgic_irq_get_lr_count(irq);
-               spin_unlock(&irq->irq_lock);
+               raw_spin_unlock(&irq->irq_lock);
 
                count += w;
                *multi_sgi |= (w > 1);
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
        count = 0;
 
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
-               spin_lock(&irq->irq_lock);
+               raw_spin_lock(&irq->irq_lock);
 
                /*
                 * If we have multi-SGIs in the pipeline, we need to
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
                 * the AP list has been sorted already.
                 */
                if (multi_sgi && irq->priority > prio) {
-                       spin_unlock(&irq->irq_lock);
+                       _raw_spin_unlock(&irq->irq_lock);
                        break;
                }
 
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
                                prio = irq->priority;
                }
 
-               spin_unlock(&irq->irq_lock);
+               raw_spin_unlock(&irq->irq_lock);
 
                if (count == kvm_vgic_global_state.nr_lr) {
                        if (!list_is_last(&irq->ap_list,
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 
        DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
-       spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+       raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
        vgic_flush_lr_state(vcpu);
-       spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+       raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
 
        if (can_access_vgic_from_kernel())
                vgic_restore_state(vcpu);
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
 
        vgic_get_vmcr(vcpu, &vmcr);
 
-       spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
+       raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
 
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
-               spin_lock(&irq->irq_lock);
+               raw_spin_lock(&irq->irq_lock);
                pending = irq_is_pending(irq) && irq->enabled &&
                          !irq->active &&
                          irq->priority < vmcr.pmr;
-               spin_unlock(&irq->irq_lock);
+               raw_spin_unlock(&irq->irq_lock);
 
                if (pending)
                        break;
        }
 
-       spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
+       raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
 
        return pending;
 }
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
                return false;
 
        irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
-       spin_lock_irqsave(&irq->irq_lock, flags);
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        map_is_active = irq->hw && irq->active;
-       spin_unlock_irqrestore(&irq->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
        return map_is_active;
 }
-