Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Jan 2015 19:01:21 +0000 (08:01 +1300)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Jan 2015 19:01:21 +0000 (08:01 +1300)
Pull arm64 fixes from Will Deacon:
 - Wire up compat_sys_execveat for compat (AArch32) tasks
 - Revert 421520ba9829, as this breaks our side of the boot protocol

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: partially revert "ARM: 8167/1: extend the reserved memory for initrd to be page aligned"
  arm64: compat: wire up compat_sys_execveat

138 files changed:
Documentation/networking/ip-sysctl.txt
Documentation/target/tcm_mod_builder.py
Documentation/thermal/cpu-cooling-api.txt
MAINTAINERS
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/boot/dts/vf610-twr.dts
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kvm/hyp.S
arch/arm64/kvm/reset.c
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/powerpc/include/asm/thread_info.h
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/s390/hypfs/hypfs_vm.c
arch/s390/include/asm/irqflags.h
arch/s390/include/asm/timex.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/syscalls.S
arch/s390/kernel/uprobes.c
arch/s390/kernel/vtime.c
arch/s390/mm/pgtable.c
arch/s390/net/bpf_jit_comp.c
arch/x86/kernel/kprobes/core.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/time.c
block/blk-core.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-mq.h
block/blk-timeout.c
drivers/acpi/int340x_thermal.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/virtio_blk.c
drivers/gpio/gpio-dln2.c
drivers/gpio/gpio-grgpio.c
drivers/isdn/hardware/eicon/message.c
drivers/leds/leds-netxbig.c
drivers/misc/cxl/context.c
drivers/misc/cxl/file.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pci.h
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/at91_ether.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e_osdep.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/team/team.c
drivers/net/usb/kaweth.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/xen-netfront.c
drivers/s390/crypto/ap_bus.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_user.c
drivers/thermal/imx_thermal.c
drivers/thermal/int340x_thermal/acpi_thermal_rel.c
drivers/thermal/int340x_thermal/processor_thermal_device.c
drivers/thermal/of-thermal.c
drivers/thermal/rcar_thermal.c
drivers/thermal/thermal_core.h
drivers/vhost/scsi.c
drivers/video/fbdev/broadsheetfb.c
drivers/video/fbdev/simplefb.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/lockd/svc.c
fs/locks.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
include/asm-generic/tlb.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/compiler.h
include/linux/mmc/sdhci.h
include/linux/netdevice.h
include/linux/nfs_fs_sb.h
include/target/target_core_backend.h
include/target/target_core_backend_configfs.h
include/target/target_core_base.h
include/uapi/linux/openvswitch.h
include/xen/interface/nmi.h [new file with mode: 0644]
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_events.c
mm/memory.c
net/bridge/br_input.c
net/core/neighbour.c
net/ipv4/netfilter/nft_redir_ipv4.c
net/ipv6/netfilter/nft_redir_ipv6.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nft_nat.c
net/openvswitch/datapath.c
net/packet/af_packet.c
net/tipc/bcast.c
tools/testing/selftests/exec/execveat.c
tools/testing/selftests/mqueue/mq_perf_tests.c
tools/testing/selftests/vm/Makefile

index 9bffdfc648dc66149401296d73eb6fc04564ebd1..85b0221791048aabb65bcee8090562384ab1c628 100644 (file)
@@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
        this when using large numbers of interfaces and/or routes.
+       From linux kernel 3.6 onwards, this is deprecated for ipv4
+       as route cache is no longer used.
 
 neigh/default/gc_thresh1 - INTEGER
        Minimum number of entries to keep.  Garbage collector will not
index 230ce71f4d75529ff4e071ed25e7bf0b4f72cd3c..2b47704f75cb3bfedf836cf02c75afd82c91e405 100755 (executable)
@@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .release_cmd                    = " + fabric_mod_name + "_release_cmd,\n"
        buf += "        .shutdown_session               = " + fabric_mod_name + "_shutdown_session,\n"
        buf += "        .close_session                  = " + fabric_mod_name + "_close_session,\n"
-       buf += "        .stop_session                   = " + fabric_mod_name + "_stop_session,\n"
-       buf += "        .fall_back_to_erl0              = " + fabric_mod_name + "_reset_nexus,\n"
-       buf += "        .sess_logged_in                 = " + fabric_mod_name + "_sess_logged_in,\n"
        buf += "        .sess_get_index                 = " + fabric_mod_name + "_sess_get_index,\n"
        buf += "        .sess_get_initiator_sid         = NULL,\n"
        buf += "        .write_pending                  = " + fabric_mod_name + "_write_pending,\n"
@@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .queue_data_in                  = " + fabric_mod_name + "_queue_data_in,\n"
        buf += "        .queue_status                   = " + fabric_mod_name + "_queue_status,\n"
        buf += "        .queue_tm_rsp                   = " + fabric_mod_name + "_queue_tm_rsp,\n"
-       buf += "        .is_state_remove                = " + fabric_mod_name + "_is_state_remove,\n"
+       buf += "        .aborted_task                   = " + fabric_mod_name + "_aborted_task,\n"
        buf += "        /*\n"
        buf += "         * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
        buf += "         */\n"
@@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        /*\n"
        buf += "         * Register the top level struct config_item_type with TCM core\n"
        buf += "         */\n"
-       buf += "        fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+       buf += "        fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
        buf += "        if (IS_ERR(fabric)) {\n"
        buf += "                printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
        buf += "                return PTR_ERR(fabric);\n"
@@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                if re.search('get_fabric_name', fo):
                        buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
                        buf += "{\n"
-                       buf += "        return \"" + fabric_mod_name[4:] + "\";\n"
+                       buf += "        return \"" + fabric_mod_name + "\";\n"
                        buf += "}\n\n"
                        bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
                        continue
@@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "}\n\n"
                        bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
 
-               if re.search('stop_session\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
-                       buf += "{\n"
-                       buf += "        return;\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
-
-               if re.search('fall_back_to_erl0\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
-                       buf += "{\n"
-                       buf += "        return;\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
-
-               if re.search('sess_logged_in\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
-                       buf += "{\n"
-                       buf += "        return 0;\n"
-                       buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
-
                if re.search('sess_get_index\)\(', fo):
                        buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
                        buf += "{\n"
@@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
 
                if re.search('queue_tm_rsp\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+                       buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
-                       buf += "        return 0;\n"
+                       buf += "        return;\n"
                        buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+                       bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
 
-               if re.search('is_state_remove\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+               if re.search('aborted_task\)\(', fo):
+                       buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
-                       buf += "        return 0;\n"
+                       buf += "        return;\n"
                        buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
-
+                       bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
 
        ret = p.write(buf)
        if ret:
@@ -1018,11 +993,11 @@ def main(modname, proto_ident):
        tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
        tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
 
-       input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
+       input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
        if input == "yes" or input == "y":
                tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
 
-       input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+       input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
        if input == "yes" or input == "y":
                tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
 
index fca24c931ec8dcb737012b6b67f6b88a8fef2223..753e47cc2e2036cd53e176241f579addd3b43ec1 100644 (file)
@@ -3,7 +3,7 @@ CPU cooling APIs How To
 
 Written by Amit Daniel Kachhap <amit.kachhap@linaro.org>
 
-Updated: 12 May 2012
+Updated: 6 Jan 2015
 
 Copyright (c)  2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
 
@@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer.
 
    clip_cpus: cpumask of cpus where the frequency constraints will happen.
 
-1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
+       struct device_node *np, const struct cpumask *clip_cpus)
+
+    This interface function registers the cpufreq cooling device with
+    the name "thermal-cpufreq-%x" linking it with a device tree node, in
+    order to bind it via the thermal DT code. This api can support multiple
+    instances of cpufreq cooling devices.
+
+    np: pointer to the cooling device device tree node
+    clip_cpus: cpumask of cpus where the frequency constraints will happen.
+
+1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 
     This interface function unregisters the "thermal-cpufreq-%x" cooling device.
 
index 3589d67437f867e121bc36b0ff2b92e758499c04..ab6610e6c610baae31aff65a9e1e09a2e866c38f 100644 (file)
@@ -4749,7 +4749,7 @@ S:        Supported
 F:     drivers/scsi/ipr.*
 
 IBM Power Virtual Ethernet Device Driver
-M:     Santiago Leon <santil@linux.vnet.ibm.com>
+M:     Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmveth.*
@@ -5280,6 +5280,15 @@ W:       www.open-iscsi.org
 Q:     http://patchwork.kernel.org/project/linux-rdma/list/
 F:     drivers/infiniband/ulp/iser/
 
+ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
+M:     Sagi Grimberg <sagig@mellanox.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
+L:     linux-rdma@vger.kernel.org
+L:     target-devel@vger.kernel.org
+S:     Supported
+W:     http://www.linux-iscsi.org
+F:     drivers/infiniband/ulp/isert
+
 ISDN SUBSYSTEM
 M:     Karsten Keil <isdn@linux-pingi.de>
 L:     isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -9534,7 +9543,8 @@ F:        drivers/platform/x86/thinkpad_acpi.c
 TI BANDGAP AND THERMAL DRIVER
 M:     Eduardo Valentin <edubezval@gmail.com>
 L:     linux-pm@vger.kernel.org
-S:     Supported
+L:     linux-omap@vger.kernel.org
+S:     Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
 TI CLOCK DRIVER
index 1e6e5cc1c14cf283fb8b3bd9321f44218fbe4fb3..8c1febd7e3f2757176d1ba0ab14450133010d177 100644 (file)
        pinctrl-0 = <&pinctrl_enet1>;
        phy-supply = <&reg_enet_3v3>;
        phy-mode = "rgmii";
+       phy-handle = <&ethphy1>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ethphy1: ethernet-phy@0 {
+                       reg = <0>;
+               };
+
+               ethphy2: ethernet-phy@1 {
+                       reg = <1>;
+               };
+       };
 };
 
 &fec2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet2>;
        phy-mode = "rgmii";
+       phy-handle = <&ethphy2>;
        status = "okay";
 };
 
index a0f762159cb26501b517a1af0d529da8abc42283..f2b64b1b00fa5231fc2493164fcc89f434687cd1 100644 (file)
 
 &fec0 {
        phy-mode = "rmii";
+       phy-handle = <&ethphy0>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_fec0>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ethphy0: ethernet-phy@0 {
+                       reg = <0>;
+               };
+
+               ethphy1: ethernet-phy@1 {
+                       reg = <1>;
+               };
+       };
 };
 
 &fec1 {
        phy-mode = "rmii";
+       phy-handle = <&ethphy1>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_fec1>;
        status = "okay";
index 8127e45e263752821c833d1c354a8033372b2a47..865a7e28ea2d166efc0f27911970fd480bce4c3d 100644 (file)
@@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+       if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+               vcpu->arch.hcr_el2 &= ~HCR_RW;
 }
 
 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
index fbe909fb0a1a8b95ab4f6e3ade19daaa21c70436..c3ca89c27c6b351839ec62f763ca99d34ac5b3c2 100644 (file)
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
         * Instead, we invalidate Stage-2 for this IPA, and the
         * whole of Stage-1. Weep...
         */
+       lsr     x1, x1, #12
        tlbi    ipas2e1is, x1
        /*
         * We have to ensure completion of the invalidation at Stage-2,
index 70a7816535cd4a9bf575b9767a9a9fd62dbe21e6..0b43265789858cbe71f761eebbc48927834b7fe8 100644 (file)
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                        if (!cpu_has_32bit_el1())
                                return -EINVAL;
                        cpu_reset = &default_regs_reset32;
-                       vcpu->arch.hcr_el2 &= ~HCR_RW;
                } else {
                        cpu_reset = &default_regs_reset;
                }
index 75e75d7b1702fb6434c59e9155dbba1d71623b17..244e0dbe45dbeda359e233cde23b4652f0ce13dc 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            355
+#define NR_syscalls            356
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 2c1bec9a14b67da42a8ed09b644373d0cf35b5ef..61fb6cb9d2ae3c66a1c0c6dec1ac95adb83dd810 100644 (file)
 #define __NR_getrandom         352
 #define __NR_memfd_create      353
 #define __NR_bpf               354
+#define __NR_execveat          355
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 2ca219e184cd16e6ebad1bd9123061695691c843..a0ec4303f2c8e57a04fb353178d43b0be6a461fe 100644 (file)
@@ -375,4 +375,5 @@ ENTRY(sys_call_table)
        .long sys_getrandom
        .long sys_memfd_create
        .long sys_bpf
+       .long sys_execveat              /* 355 */
 
index ebc4f165690a9fc0113864b6857708b5df8c317a..0be6c681cab1341061c02031464d5355ff8a4d7d 100644 (file)
@@ -23,9 +23,9 @@
 #define THREAD_SIZE            (1 << THREAD_SHIFT)
 
 #ifdef CONFIG_PPC64
-#define CURRENT_THREAD_INFO(dest, sp)  clrrdi dest, sp, THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
 #else
-#define CURRENT_THREAD_INFO(dest, sp)  rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
 #endif
 
 #ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
 #define THREAD_SIZE_ORDER      (THREAD_SHIFT - PAGE_SHIFT)
 
 /* how to get the thread information struct from C */
-register unsigned long __current_r1 asm("r1");
 static inline struct thread_info *current_thread_info(void)
 {
-       /* gcc4, at least, is smart enough to turn this into a single
-        * rlwinm for ppc32 and clrrdi for ppc64 */
-       return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
+       unsigned long val;
+
+       asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
+
+       return (struct thread_info *)val;
 }
 
 #endif /* __ASSEMBLY__ */
index 54eca8b3b288f8fcca45ef5f44c5211832b3416f..0509bca5e830b656c8a553c047740b68b172f4b1 100644 (file)
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION;                                            \
        b       1f;                                             \
 END_FTR_SECTION(0, 1);                                         \
        ld      r12,opal_tracepoint_refcount@toc(r2);           \
-       std     r12,32(r1);                                     \
        cmpdi   r12,0;                                          \
        bne-    LABEL;                                          \
 1:
index 32040ace00ea2431a18428dca5c34c0c4ebde10c..afbe07907c10b6304e52b5eb234d33694fd9693a 100644 (file)
@@ -231,7 +231,7 @@ failed:
 struct dbfs_d2fc_hdr {
        u64     len;            /* Length of d2fc buffer without header */
        u16     version;        /* Version of header */
-       char    tod_ext[16];    /* TOD clock for d2fc */
+       char    tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
        u64     count;          /* Number of VM guests in d2fc buffer */
        char    reserved[30];
 } __attribute__ ((packed));
index 37b9091ab8c010c88a22bb2851f0133b10c6f032..16aa0c779e0762e210fe6652a0a5efda24771381 100644 (file)
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
 
 static inline notrace unsigned long arch_local_save_flags(void)
 {
-       return __arch_local_irq_stosm(0x00);
+       return __arch_local_irq_stnsm(0xff);
 }
 
 static inline notrace unsigned long arch_local_irq_save(void)
index 8beee1cceba4ed17831736a11c6f5167155335d6..98eb2a5792234d9d12c303bdb1301f869f706b60 100644 (file)
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
        set_clock_comparator(S390_lowcore.clock_comparator);
 }
 
-#define CLOCK_TICK_RATE        1193180 /* Underlying HZ */
+#define CLOCK_TICK_RATE                1193180 /* Underlying HZ */
+#define STORE_CLOCK_EXT_SIZE   16      /* stcke writes 16 bytes */
 
 typedef unsigned long long cycles_t;
 
-static inline void get_tod_clock_ext(char clk[16])
+static inline void get_tod_clock_ext(char *clk)
 {
-       typedef struct { char _[sizeof(clk)]; } addrtype;
+       typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
 
        asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
 }
 
 static inline unsigned long long get_tod_clock(void)
 {
-       unsigned char clk[16];
+       unsigned char clk[STORE_CLOCK_EXT_SIZE];
+
        get_tod_clock_ext(clk);
        return *((unsigned long long *)&clk[1]);
 }
index 2b446cf0cc65543d38defaf1985d4246f767449b..67878af257a083c531140f74b951019e2a1537e0 100644 (file)
 #define __NR_bpf               351
 #define __NR_s390_pci_mmio_write       352
 #define __NR_s390_pci_mmio_read                353
-#define NR_syscalls 354
+#define __NR_execveat          354
+#define NR_syscalls 355
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index a2987243bc76c89bd07a6d4a298825bea9664ab8..939ec474b1dd705e7814f94c94df4faeb8009aa2 100644 (file)
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
 SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
index f6b3cd056ec22c1c28b908c4cefc46bc4a4e2099..cc7328080b609a653b72c1ca2c6989f054e3be74 100644 (file)
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
        return false;
 }
 
+static int check_per_event(unsigned short cause, unsigned long control,
+                          struct pt_regs *regs)
+{
+       if (!(regs->psw.mask & PSW_MASK_PER))
+               return 0;
+       /* user space single step */
+       if (control == 0)
+               return 1;
+       /* over indication for storage alteration */
+       if ((control & 0x20200000) && (cause & 0x2000))
+               return 1;
+       if (cause & 0x8000) {
+               /* all branches */
+               if ((control & 0x80800000) == 0x80000000)
+                       return 1;
+               /* branch into selected range */
+               if (((control & 0x80800000) == 0x80800000) &&
+                   regs->psw.addr >= current->thread.per_user.start &&
+                   regs->psw.addr <= current->thread.per_user.end)
+                       return 1;
+       }
+       return 0;
+}
+
 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
 {
        int fixup = probe_get_fixup_type(auprobe->insn);
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
                if (regs->psw.addr - utask->xol_vaddr == ilen)
                        regs->psw.addr = utask->vaddr + ilen;
        }
-       /* If per tracing was active generate trap */
-       if (regs->psw.mask & PSW_MASK_PER)
-               do_per_trap(regs);
+       if (check_per_event(current->thread.per_event.cause,
+                           current->thread.per_user.control, regs)) {
+               /* fix per address */
+               current->thread.per_event.address = utask->vaddr;
+               /* trigger per event */
+               set_pt_regs_flag(regs, PIF_PER_TRAP);
+       }
        return 0;
 }
 
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
        clear_thread_flag(TIF_UPROBE_SINGLESTEP);
        regs->int_code = auprobe->saved_int_code;
        regs->psw.addr = current->utask->vaddr;
+       current->thread.per_event.address = current->utask->vaddr;
 }
 
 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
        __rc;                                           \
 })
 
-#define emu_store_ril(ptr, input)                      \
+#define emu_store_ril(regs, ptr, input)                        \
 ({                                                     \
        unsigned int mask = sizeof(*(ptr)) - 1;         \
+       __typeof__(ptr) __ptr = (ptr);                  \
        int __rc = 0;                                   \
                                                        \
        if (!test_facility(34))                         \
                __rc = EMU_ILLEGAL_OP;                  \
-       else if ((u64 __force)ptr & mask)               \
+       else if ((u64 __force)__ptr & mask)             \
                __rc = EMU_SPECIFICATION;               \
-       else if (put_user(*(input), ptr))               \
+       else if (put_user(*(input), __ptr))             \
                __rc = EMU_ADDRESSING;                  \
+       if (__rc == 0)                                  \
+               sim_stor_event(regs, __ptr, mask + 1);  \
        __rc;                                           \
 })
 
@@ -197,6 +229,25 @@ union split_register {
        s16 s16[4];
 };
 
+/*
+ * If user per registers are setup to trace storage alterations and an
+ * emulated store took place on a fitting address a user trap is generated.
+ */
+static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
+{
+       if (!(regs->psw.mask & PSW_MASK_PER))
+               return;
+       if (!(current->thread.per_user.control & PER_EVENT_STORE))
+               return;
+       if ((void *)current->thread.per_user.start > (addr + len))
+               return;
+       if ((void *)current->thread.per_user.end < addr)
+               return;
+       current->thread.per_event.address = regs->psw.addr;
+       current->thread.per_event.cause = PER_EVENT_STORE >> 16;
+       set_pt_regs_flag(regs, PIF_PER_TRAP);
+}
+
 /*
  * pc relative instructions are emulated, since parameters may not be
  * accessible from the xol area due to range limitations.
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
                        rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
                        break;
                case 0x07: /* sthrl */
-                       rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
+                       rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
                        break;
                case 0x0b: /* stgrl */
-                       rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
+                       rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
                        break;
                case 0x0f: /* strl */
-                       rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
+                       rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
                        break;
                }
                break;
index 7f0089d9a4aa47ef86e7691502281c140ee637c3..e34122e539a16bad4e5727f881f982e4829cd8f0 100644 (file)
@@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk)
        struct thread_info *ti = task_thread_info(tsk);
        u64 timer, system;
 
-       WARN_ON_ONCE(!irqs_disabled());
-
        timer = S390_lowcore.last_update_timer;
        S390_lowcore.last_update_timer = get_vtimer();
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
index be99357d238c68e34dee133c52053e23c88a34d4..3cf8cc03fff60d7a59e7b23f4e92d477061a5f42 100644 (file)
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
 {
        struct page *page;
-       unsigned long offset;
+       unsigned long offset, mask;
 
        offset = (unsigned long) entry / sizeof(unsigned long);
        offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
-       page = pmd_to_page((pmd_t *) entry);
+       mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+       page = virt_to_page((void *)((unsigned long) entry & mask));
        return page->index + offset;
 }
 
index c52ac77408ca5cac7ad50b6a2a8a3113d8af30ca..524496d47ef506d0ca888356df21fd4bb7e25053 100644 (file)
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
                EMIT4_DISP(0x88500000, K);
                break;
        case BPF_ALU | BPF_NEG: /* A = -A */
-               /* lnr %r5,%r5 */
-               EMIT2(0x1155);
+               /* lcr %r5,%r5 */
+               EMIT2(0x1355);
                break;
        case BPF_JMP | BPF_JA: /* ip += K */
                offset = addrs[i + K] + jit->start - jit->prg;
@@ -502,8 +502,8 @@ branch:             if (filter->jt == filter->jf) {
 xbranch:       /* Emit compare if the branch targets are different */
                if (filter->jt != filter->jf) {
                        jit->seen |= SEEN_XREG;
-                       /* cr %r5,%r12 */
-                       EMIT2(0x195c);
+                       /* clr %r5,%r12 */
+                       EMIT2(0x155c);
                }
                goto branch;
        case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
index f7e3cd50ece02a7b0408683d113bbafca8d49479..98f654d466e585167153e58811902675bfeb5baa 100644 (file)
@@ -1020,6 +1020,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        regs->flags &= ~X86_EFLAGS_IF;
        trace_hardirqs_off();
        regs->ip = (unsigned long)(jp->entry);
+
+       /*
+        * jprobes use jprobe_return() which skips the normal return
+        * path of the function, and this messes up the accounting of the
+        * function graph tracer to get messed up.
+        *
+        * Pause function graph tracing while performing the jprobe function.
+        */
+       pause_graph_tracing();
        return 1;
 }
 NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -1048,24 +1057,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
        u8 *addr = (u8 *) (regs->ip - 1);
        struct jprobe *jp = container_of(p, struct jprobe, kp);
+       void *saved_sp = kcb->jprobe_saved_sp;
 
        if ((addr > (u8 *) jprobe_return) &&
            (addr < (u8 *) jprobe_return_end)) {
-               if (stack_addr(regs) != kcb->jprobe_saved_sp) {
+               if (stack_addr(regs) != saved_sp) {
                        struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
                        printk(KERN_ERR
                               "current sp %p does not match saved sp %p\n",
-                              stack_addr(regs), kcb->jprobe_saved_sp);
+                              stack_addr(regs), saved_sp);
                        printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
                        show_regs(saved_regs);
                        printk(KERN_ERR "Current registers\n");
                        show_regs(regs);
                        BUG();
                }
+               /* It's OK to start function graph tracing again */
+               unpause_graph_tracing();
                *regs = kcb->jprobe_saved_regs;
-               memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
-                      kcb->jprobes_stack,
-                      MIN_STACK_SIZE(kcb->jprobe_saved_sp));
+               memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
                preempt_enable_no_resched();
                return 1;
        }
index 6bf3a13e3e0f7af10c8d984f829a512661d12c2e..78a881b7fc415e16f50f16e4381e968370f4fd9f 100644 (file)
@@ -40,6 +40,7 @@
 #include <xen/interface/physdev.h>
 #include <xen/interface/vcpu.h>
 #include <xen/interface/memory.h>
+#include <xen/interface/nmi.h>
 #include <xen/interface/xen-mca.h>
 #include <xen/features.h>
 #include <xen/page.h>
@@ -66,6 +67,7 @@
 #include <asm/reboot.h>
 #include <asm/stackprotector.h>
 #include <asm/hypervisor.h>
+#include <asm/mach_traps.h>
 #include <asm/mwait.h>
 #include <asm/pci_x86.h>
 #include <asm/pat.h>
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
        .emergency_restart = xen_emergency_restart,
 };
 
+static unsigned char xen_get_nmi_reason(void)
+{
+       unsigned char reason = 0;
+
+       /* Construct a value which looks like it came from port 0x61. */
+       if (test_bit(_XEN_NMIREASON_io_error,
+                    &HYPERVISOR_shared_info->arch.nmi_reason))
+               reason |= NMI_REASON_IOCHK;
+       if (test_bit(_XEN_NMIREASON_pci_serr,
+                    &HYPERVISOR_shared_info->arch.nmi_reason))
+               reason |= NMI_REASON_SERR;
+
+       return reason;
+}
+
 static void __init xen_boot_params_init_edd(void)
 {
 #if IS_ENABLED(CONFIG_EDD)
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
        pv_info = xen_info;
        pv_init_ops = xen_init_ops;
        pv_apic_ops = xen_apic_ops;
-       if (!xen_pvh_domain())
+       if (!xen_pvh_domain()) {
                pv_cpu_ops = xen_cpu_ops;
 
+               x86_platform.get_nmi_reason = xen_get_nmi_reason;
+       }
+
        if (xen_feature(XENFEAT_auto_translated_physmap))
                x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
        else
index edbc7a63fd737f0ca13edac752e1f06341f9dd97..70fb5075c901f5b0c370478b156288764f609ad8 100644 (file)
@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
        return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
 }
 
-/* Only to be called in case of a race for a page just allocated! */
-static void free_p2m_page(void *p)
+static void __ref free_p2m_page(void *p)
 {
-       BUG_ON(!slab_is_available());
+       if (unlikely(!slab_is_available())) {
+               free_bootmem((unsigned long)p, PAGE_SIZE);
+               return;
+       }
+
        free_page((unsigned long)p);
 }
 
@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
                        p2m_missing_pte : p2m_identity_pte;
                for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
                        pmdp = populate_extra_pmd(
-                               (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+                               (unsigned long)(p2m + pfn) + i * PMD_SIZE);
                        set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
                }
        }
@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
  * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
  * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
  */
-static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
 {
        pte_t *ptechk;
-       pte_t *pteret = ptep;
        pte_t *pte_newpg[PMDS_PER_MID_PAGE];
        pmd_t *pmdp;
        unsigned int level;
@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
                if (ptechk == pte_pg) {
                        set_pmd(pmdp,
                                __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
-                       if (vaddr == (addr & ~(PMD_SIZE - 1)))
-                               pteret = pte_offset_kernel(pmdp, addr);
                        pte_newpg[i] = NULL;
                }
 
@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
                vaddr += PMD_SIZE;
        }
 
-       return pteret;
+       return lookup_address(addr, &level);
 }
 
 /*
@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
 
        if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
                /* PMD level is missing, allocate a new one */
-               ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+               ptep = alloc_p2m_pmd(addr, pte_pg);
                if (!ptep)
                        return false;
        }
index dfd77dec8e2b7c1ef72d16adb1ecf7f7b80c88de..865e56cea7a0abe4d9b6feb2e1d0da27957d47e8 100644 (file)
@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
 {
        int i;
-       unsigned long addr = PFN_PHYS(pfn);
+       phys_addr_t addr = PFN_PHYS(pfn);
 
        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
                if (addr >= xen_extra_mem[i].start &&
@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
        int i;
 
        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+               if (!xen_extra_mem[i].size)
+                       continue;
                pfn_s = PFN_DOWN(xen_extra_mem[i].start);
                pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
                for (pfn = pfn_s; pfn < pfn_e; pfn++)
@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
  * as a fallback if the remapping fails.
  */
 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
-       unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
-       unsigned long *released)
+       unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
 {
-       unsigned long len = 0;
        unsigned long pfn, end;
        int ret;
 
        WARN_ON(start_pfn > end_pfn);
 
+       /* Release pages first. */
        end = min(end_pfn, nr_pages);
        for (pfn = start_pfn; pfn < end; pfn++) {
                unsigned long mfn = pfn_to_mfn(pfn);
@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
                WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
 
                if (ret == 1) {
+                       (*released)++;
                        if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
                                break;
-                       len++;
                } else
                        break;
        }
 
-       /* Need to release pages first */
-       *released += len;
-       *identity += set_phys_range_identity(start_pfn, end_pfn);
+       set_phys_range_identity(start_pfn, end_pfn);
 }
 
 /*
@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
        }
 
        /* Update kernel mapping, but not for highmem. */
-       if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+       if (pfn >= PFN_UP(__pa(high_memory - 1)))
                return;
 
        if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
        unsigned long ident_pfn_iter, remap_pfn_iter;
        unsigned long ident_end_pfn = start_pfn + size;
        unsigned long left = size;
-       unsigned long ident_cnt = 0;
        unsigned int i, chunk;
 
        WARN_ON(size == 0);
@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
                xen_remap_mfn = mfn;
 
                /* Set identity map */
-               ident_cnt += set_phys_range_identity(ident_pfn_iter,
-                       ident_pfn_iter + chunk);
+               set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
 
                left -= chunk;
        }
@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
 static unsigned long __init xen_set_identity_and_remap_chunk(
         const struct e820entry *list, size_t map_size, unsigned long start_pfn,
        unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
-       unsigned long *identity, unsigned long *released)
+       unsigned long *released, unsigned long *remapped)
 {
        unsigned long pfn;
        unsigned long i = 0;
@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                /* Do not remap pages beyond the current allocation */
                if (cur_pfn >= nr_pages) {
                        /* Identity map remaining pages */
-                       *identity += set_phys_range_identity(cur_pfn,
-                               cur_pfn + size);
+                       set_phys_range_identity(cur_pfn, cur_pfn + size);
                        break;
                }
                if (cur_pfn + size > nr_pages)
@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                if (!remap_range_size) {
                        pr_warning("Unable to find available pfn range, not remapping identity pages\n");
                        xen_set_identity_and_release_chunk(cur_pfn,
-                               cur_pfn + left, nr_pages, identity, released);
+                               cur_pfn + left, nr_pages, released);
                        break;
                }
                /* Adjust size to fit in current e820 RAM region */
@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                /* Update variables to reflect new mappings. */
                i += size;
                remap_pfn += size;
-               *identity += size;
+               *remapped += size;
        }
 
        /*
@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
 
 static void __init xen_set_identity_and_remap(
        const struct e820entry *list, size_t map_size, unsigned long nr_pages,
-       unsigned long *released)
+       unsigned long *released, unsigned long *remapped)
 {
        phys_addr_t start = 0;
-       unsigned long identity = 0;
        unsigned long last_pfn = nr_pages;
        const struct e820entry *entry;
        unsigned long num_released = 0;
+       unsigned long num_remapped = 0;
        int i;
 
        /*
@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
                                last_pfn = xen_set_identity_and_remap_chunk(
                                                list, map_size, start_pfn,
                                                end_pfn, nr_pages, last_pfn,
-                                               &identity, &num_released);
+                                               &num_released, &num_remapped);
                        start = end;
                }
        }
 
        *released = num_released;
+       *remapped = num_remapped;
 
-       pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
        pr_info("Released %ld page(s)\n", num_released);
 }
 
@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
        struct xen_memory_map memmap;
        unsigned long max_pages;
        unsigned long extra_pages = 0;
+       unsigned long remapped_pages;
        int i;
        int op;
 
@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
         * underlying RAM.
         */
        xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
-                                  &xen_released_pages);
+                                  &xen_released_pages, &remapped_pages);
 
        extra_pages += xen_released_pages;
+       extra_pages += remapped_pages;
 
        /*
         * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
index f473d268d387fcdc8f237153b378508ec0c03f56..69087341d9aed7860dfd95549c0a8af5ffbf1ed6 100644 (file)
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
 
 struct xen_clock_event_device {
        struct clock_event_device evt;
-       char *name;
+       char name[16];
 };
 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
 
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
        if (evt->irq >= 0) {
                unbind_from_irqhandler(evt->irq, NULL);
                evt->irq = -1;
-               kfree(per_cpu(xen_clock_events, cpu).name);
-               per_cpu(xen_clock_events, cpu).name = NULL;
        }
 }
 
 void xen_setup_timer(int cpu)
 {
-       char *name;
-       struct clock_event_device *evt;
+       struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
+       struct clock_event_device *evt = &xevt->evt;
        int irq;
 
-       evt = &per_cpu(xen_clock_events, cpu).evt;
        WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
        if (evt->irq >= 0)
                xen_teardown_timer(cpu);
 
        printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
 
-       name = kasprintf(GFP_KERNEL, "timer%d", cpu);
-       if (!name)
-               name = "<timer kasprintf failed>";
+       snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
 
        irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
                                      IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
                                      IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
-                                     name, NULL);
+                                     xevt->name, NULL);
        (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
 
        memcpy(evt, xen_clockevent, sizeof(*evt));
 
        evt->cpumask = cpumask_of(cpu);
        evt->irq = irq;
-       per_cpu(xen_clock_events, cpu).name = name;
 }
 
 
 void xen_setup_cpu_clockevents(void)
 {
-       BUG_ON(preemptible());
-
        clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
 }
 
index 30f6153a40c27c7154dd453301807c7d39d1451c..3ad405571dcc5105a52da4284477a187db936f64 100644 (file)
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
+void blk_set_queue_dying(struct request_queue *q)
+{
+       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+
+       if (q->mq_ops)
+               blk_mq_wake_waiters(q);
+       else {
+               struct request_list *rl;
+
+               blk_queue_for_each_rl(rl, q) {
+                       if (rl->rq_pool) {
+                               wake_up(&rl->wait[BLK_RW_SYNC]);
+                               wake_up(&rl->wait[BLK_RW_ASYNC]);
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+
 /**
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
 
        /* mark @q DYING, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
-       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+       blk_set_queue_dying(q);
        spin_lock_irq(lock);
 
        /*
index 32e8dbb9ad1c49f0078e57fae100f0e6a7eb8a73..60c9d4a93fe470ced7471cd8653d8a00fc8922d7 100644 (file)
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 }
 
 /*
- * Wakeup all potentially sleeping on normal (non-reserved) tags
+ * Wakeup all potentially sleeping on tags
  */
-static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
+void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 {
        struct blk_mq_bitmap_tags *bt;
        int i, wake_index;
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
 
                wake_index = bt_index_inc(wake_index);
        }
+
+       if (include_reserve) {
+               bt = &tags->breserved_tags;
+               if (waitqueue_active(&bt->bs[0].wait))
+                       wake_up(&bt->bs[0].wait);
+       }
 }
 
 /*
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 
        atomic_dec(&tags->active_queues);
 
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
 }
 
 /*
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
         * static and should never need resizing.
         */
        bt_update_count(&tags->bitmap_tags, tdepth);
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
        return 0;
 }
 
index 6206ed17ef766714b655a715ffbc05fd34b0463a..a6fa0fc9d41a2e91c8bb4ce29bb2b1a0d952c8ed 100644 (file)
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
 extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
+extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
 
 enum {
        BLK_MQ_TAG_CACHE_MIN    = 1,
index da1ab5641227b670faac42a84fde7e223668a4d8..2f95747c287eac350b45cc272fcd3d6e9c43ee09 100644 (file)
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
-static void blk_mq_freeze_queue_start(struct request_queue *q)
+void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        bool freeze;
 
@@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
                blk_mq_run_queues(q, false);
        }
 }
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
@@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
        blk_mq_freeze_queue_wait(q);
 }
 
-static void blk_mq_unfreeze_queue(struct request_queue *q)
+void blk_mq_unfreeze_queue(struct request_queue *q)
 {
        bool wake;
 
@@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
                wake_up_all(&q->mq_freeze_wq);
        }
 }
+EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+
+void blk_mq_wake_waiters(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+
+       queue_for_each_hw_ctx(q, hctx, i)
+               if (blk_mq_hw_queue_mapped(hctx))
+                       blk_mq_tag_wakeup_all(hctx->tags, true);
+
+       /*
+        * If we are called because the queue has now been marked as
+        * dying, we need to ensure that processes currently waiting on
+        * the queue are notified as well.
+        */
+       wake_up_all(&q->mq_freeze_wq);
+}
 
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 {
@@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
                ctx = alloc_data.ctx;
        }
        blk_mq_put_ctx(ctx);
-       if (!rq)
+       if (!rq) {
+               blk_mq_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
+       }
        return rq;
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
+int blk_mq_request_started(struct request *rq)
+{
+       return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_started);
+
 void blk_mq_start_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
@@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
 }
 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
+void blk_mq_cancel_requeue_work(struct request_queue *q)
+{
+       cancel_work_sync(&q->requeue_work);
+}
+EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
+
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
        kblockd_schedule_work(&q->requeue_work);
 }
 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 
+void blk_mq_abort_requeue_list(struct request_queue *q)
+{
+       unsigned long flags;
+       LIST_HEAD(rq_list);
+
+       spin_lock_irqsave(&q->requeue_lock, flags);
+       list_splice_init(&q->requeue_list, &rq_list);
+       spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+       while (!list_empty(&rq_list)) {
+               struct request *rq;
+
+               rq = list_first_entry(&rq_list, struct request, queuelist);
+               list_del_init(&rq->queuelist);
+               rq->errors = -EIO;
+               blk_mq_end_request(rq, rq->errors);
+       }
+}
+EXPORT_SYMBOL(blk_mq_abort_requeue_list);
+
 static inline bool is_flush_request(struct request *rq,
                struct blk_flush_queue *fq, unsigned int tag)
 {
@@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
                break;
        }
 }
-               
+
 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                struct request *rq, void *priv, bool reserved)
 {
        struct blk_mq_timeout_data *data = priv;
 
-       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+               /*
+                * If a request wasn't started before the queue was
+                * marked dying, kill it here or it'll go unnoticed.
+                */
+               if (unlikely(blk_queue_dying(rq->q))) {
+                       rq->errors = -EIO;
+                       blk_mq_complete_request(rq);
+               }
+               return;
+       }
+       if (rq->cmd_flags & REQ_NO_TIMEOUT)
                return;
 
        if (time_after_eq(jiffies, rq->deadline)) {
@@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
        hctx->queue = q;
        hctx->queue_num = hctx_idx;
        hctx->flags = set->flags;
-       hctx->cmd_size = set->cmd_size;
 
        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                        blk_mq_hctx_notify, hctx);
index 206230e64f7915e642ce7306aec8b949deca43b1..4f4f943c22c3d1e907ef2c18224b8635f0057e82 100644 (file)
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
 void blk_mq_clone_flush_request(struct request *flush_rq,
                struct request *orig_rq);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+void blk_mq_wake_waiters(struct request_queue *q);
 
 /*
  * CPU hotplug helpers
index 56c025894cdf2d73f78c5346c9c5987d5deb0e37..246dfb16c3d988c4f84749065a66977b825c98b5 100644 (file)
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
        struct request_queue *q = req->q;
        unsigned long expiry;
 
+       if (req->cmd_flags & REQ_NO_TIMEOUT)
+               return;
+
        /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
        if (!q->mq_ops && !q->rq_timed_out_fn)
                return;
index a27d31d1ba24afcd176d1151aff62da8350e54b4..9dcf83682e367e889db67cd5ae44670878893459 100644 (file)
 
 #include "internal.h"
 
-#define DO_ENUMERATION 0x01
+#define INT3401_DEVICE 0X01
 static const struct acpi_device_id int340x_thermal_device_ids[] = {
-       {"INT3400", DO_ENUMERATION },
-       {"INT3401"},
+       {"INT3400"},
+       {"INT3401", INT3401_DEVICE},
        {"INT3402"},
        {"INT3403"},
        {"INT3404"},
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
                                        const struct acpi_device_id *id)
 {
 #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
-       if (id->driver_data == DO_ENUMERATION)
+       acpi_create_platform_device(adev);
+#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
+       /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+       if (id->driver_data == INT3401_DEVICE)
                acpi_create_platform_device(adev);
 #endif
        return 1;
index ae9f615382f6173c9ad6377c4bc4275403575fdf..aa2224aa7caa34d5854aebfb7ceaf4cebd29eccc 100644 (file)
@@ -530,7 +530,7 @@ static int null_add_dev(void)
                        goto out_cleanup_queues;
 
                nullb->q = blk_mq_init_queue(&nullb->tag_set);
-               if (!nullb->q) {
+               if (IS_ERR(nullb->q)) {
                        rv = -ENOMEM;
                        goto out_cleanup_tags;
                }
index b1d5d87973157b4c6e4a70b757519c37b3460201..cb529e9a82dd685b5b372bea2ed272c59fae5bc5 100644 (file)
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
        cmd->fn = handler;
        cmd->ctx = ctx;
        cmd->aborted = 0;
+       blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
 }
 
 /* Special values must be less than 0x1000 */
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
        if (unlikely(status)) {
                if (!(status & NVME_SC_DNR || blk_noretry_request(req))
                    && (jiffies - req->start_time) < req->timeout) {
+                       unsigned long flags;
+
                        blk_mq_requeue_request(req);
-                       blk_mq_kick_requeue_list(req->q);
+                       spin_lock_irqsave(req->q->queue_lock, flags);
+                       if (!blk_queue_stopped(req->q))
+                               blk_mq_kick_requeue_list(req->q);
+                       spin_unlock_irqrestore(req->q->queue_lock, flags);
                        return;
                }
                req->errors = nvme_error_status(status);
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                }
        }
 
-       blk_mq_start_request(req);
-
        nvme_set_info(cmd, iod, req_completion);
        spin_lock_irq(&nvmeq->q_lock);
        if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
+       req->cmd_flags |= REQ_NO_TIMEOUT;
        cmd_info = blk_mq_rq_to_pdu(req);
        nvme_set_info(cmd_info, req, async_req_completion);
 
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
        struct nvme_command cmd;
 
        if (!nvmeq->qid || cmd_rq->aborted) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dev_list_lock, flags);
                if (work_busy(&dev->reset_work))
-                       return;
+                       goto out;
                list_del_init(&dev->node);
                dev_warn(&dev->pci_dev->dev,
                        "I/O %d QID %d timeout, reset controller\n",
                                                        req->tag, nvmeq->qid);
                dev->reset_workfn = nvme_reset_failed_dev;
                queue_work(nvme_workq, &dev->reset_work);
+ out:
+               spin_unlock_irqrestore(&dev_list_lock, flags);
                return;
        }
 
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
        void *ctx;
        nvme_completion_fn fn;
        struct nvme_cmd_info *cmd;
-       static struct nvme_completion cqe = {
-               .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
-       };
+       struct nvme_completion cqe;
+
+       if (!blk_mq_request_started(req))
+               return;
 
        cmd = blk_mq_rq_to_pdu(req);
 
        if (cmd->ctx == CMD_CTX_CANCELLED)
                return;
 
+       if (blk_queue_dying(req->q))
+               cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+       else
+               cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
+
+
        dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
                                                req->tag, nvmeq->qid);
        ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd->nvmeq;
 
-       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
-                                                       nvmeq->qid);
-       if (nvmeq->dev->initialized)
-               nvme_abort_req(req);
-
        /*
         * The aborted req will be completed on receiving the abort req.
         * We enable the timer again. If hit twice, it'll cause a device reset,
         * as the device then is in a faulty state.
         */
-       return BLK_EH_RESET_TIMER;
+       int ret = BLK_EH_RESET_TIMER;
+
+       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+                                                       nvmeq->qid);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       if (!nvmeq->dev->initialized) {
+               /*
+                * Force cancelled command frees the request, which requires we
+                * return BLK_EH_NOT_HANDLED.
+                */
+               nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
+               ret = BLK_EH_NOT_HANDLED;
+       } else
+               nvme_abort_req(req);
+       spin_unlock_irq(&nvmeq->q_lock);
+
+       return ret;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
  */
 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 {
-       int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+       int vector;
 
        spin_lock_irq(&nvmeq->q_lock);
+       if (nvmeq->cq_vector == -1) {
+               spin_unlock_irq(&nvmeq->q_lock);
+               return 1;
+       }
+       vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
        nvmeq->dev->online_queues--;
+       nvmeq->cq_vector = -1;
        spin_unlock_irq(&nvmeq->q_lock);
 
        irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
                adapter_delete_sq(dev, qid);
                adapter_delete_cq(dev, qid);
        }
+       if (!qid && dev->admin_q)
+               blk_mq_freeze_queue_start(dev->admin_q);
        nvme_clear_queue(nvmeq);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
-                                                       int depth, int vector)
+                                                       int depth)
 {
        struct device *dmadev = &dev->pci_dev->dev;
        struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        nvmeq->cq_phase = 1;
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        nvmeq->q_depth = depth;
-       nvmeq->cq_vector = vector;
        nvmeq->qid = qid;
        dev->queue_count++;
        dev->queues[qid] = nvmeq;
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        struct nvme_dev *dev = nvmeq->dev;
        int result;
 
+       nvmeq->cq_vector = qid - 1;
        result = adapter_alloc_cq(dev, qid, nvmeq);
        if (result < 0)
                return result;
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
        .timeout        = nvme_timeout,
 };
 
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+       if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
+               blk_cleanup_queue(dev->admin_q);
+               blk_mq_free_tag_set(&dev->admin_tagset);
+       }
+}
+
 static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 {
        if (!dev->admin_q) {
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
                        return -ENOMEM;
 
                dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
-               if (!dev->admin_q) {
+               if (IS_ERR(dev->admin_q)) {
                        blk_mq_free_tag_set(&dev->admin_tagset);
                        return -ENOMEM;
                }
-       }
+               if (!blk_get_queue(dev->admin_q)) {
+                       nvme_dev_remove_admin(dev);
+                       return -ENODEV;
+               }
+       } else
+               blk_mq_unfreeze_queue(dev->admin_q);
 
        return 0;
 }
 
-static void nvme_free_admin_tags(struct nvme_dev *dev)
-{
-       if (dev->admin_q)
-               blk_mq_free_tag_set(&dev->admin_tagset);
-}
-
 static int nvme_configure_admin_queue(struct nvme_dev *dev)
 {
        int result;
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 
        nvmeq = dev->queues[0];
        if (!nvmeq) {
-               nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
+               nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
                if (!nvmeq)
                        return -ENOMEM;
        }
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        if (result)
                goto free_nvmeq;
 
-       result = nvme_alloc_admin_tags(dev);
-       if (result)
-               goto free_nvmeq;
-
+       nvmeq->cq_vector = 0;
        result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
        if (result)
-               goto free_tags;
+               goto free_nvmeq;
 
        return result;
 
- free_tags:
-       nvme_free_admin_tags(dev);
  free_nvmeq:
        nvme_free_queues(dev, 0);
        return result;
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
        unsigned i;
 
        for (i = dev->queue_count; i <= dev->max_qid; i++)
-               if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+               if (!nvme_alloc_queue(dev, i, dev->q_depth))
                        break;
 
        for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
                        break;
                if (!schedule_timeout(ADMIN_TIMEOUT) ||
                                        fatal_signal_pending(current)) {
+                       /*
+                        * Disable the controller first since we can't trust it
+                        * at this point, but leave the admin queue enabled
+                        * until all queue deletion requests are flushed.
+                        * FIXME: This may take a while if there are more h/w
+                        * queues than admin tags.
+                        */
                        set_current_state(TASK_RUNNING);
-
                        nvme_disable_ctrl(dev, readq(&dev->bar->cap));
-                       nvme_disable_queue(dev, 0);
-
-                       send_sig(SIGKILL, dq->worker->task, 1);
+                       nvme_clear_queue(dev->queues[0]);
                        flush_kthread_worker(dq->worker);
+                       nvme_disable_queue(dev, 0);
                        return;
                }
        }
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
 {
        struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
                                                        cmdinfo.work);
-       allow_signal(SIGKILL);
        if (nvme_delete_sq(nvmeq))
                nvme_del_queue_end(nvmeq);
 }
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
                kthread_stop(tmp);
 }
 
+static void nvme_freeze_queues(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns;
+
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               blk_mq_freeze_queue_start(ns->queue);
+
+               spin_lock(ns->queue->queue_lock);
+               queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+               spin_unlock(ns->queue->queue_lock);
+
+               blk_mq_cancel_requeue_work(ns->queue);
+               blk_mq_stop_hw_queues(ns->queue);
+       }
+}
+
+static void nvme_unfreeze_queues(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns;
+
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+               blk_mq_unfreeze_queue(ns->queue);
+               blk_mq_start_stopped_hw_queues(ns->queue, true);
+               blk_mq_kick_requeue_list(ns->queue);
+       }
+}
+
 static void nvme_dev_shutdown(struct nvme_dev *dev)
 {
        int i;
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        dev->initialized = 0;
        nvme_dev_list_remove(dev);
 
-       if (dev->bar)
+       if (dev->bar) {
+               nvme_freeze_queues(dev);
                csts = readl(&dev->bar->csts);
+       }
        if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
                for (i = dev->queue_count - 1; i >= 0; i--) {
                        struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        nvme_dev_unmap(dev);
 }
 
-static void nvme_dev_remove_admin(struct nvme_dev *dev)
-{
-       if (dev->admin_q && !blk_queue_dying(dev->admin_q))
-               blk_cleanup_queue(dev->admin_q);
-}
-
 static void nvme_dev_remove(struct nvme_dev *dev)
 {
        struct nvme_ns *ns;
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
        list_for_each_entry(ns, &dev->namespaces, list) {
                if (ns->disk->flags & GENHD_FL_UP)
                        del_gendisk(ns->disk);
-               if (!blk_queue_dying(ns->queue))
+               if (!blk_queue_dying(ns->queue)) {
+                       blk_mq_abort_requeue_list(ns->queue);
                        blk_cleanup_queue(ns->queue);
+               }
        }
 }
 
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
        nvme_free_namespaces(dev);
        nvme_release_instance(dev);
        blk_mq_free_tag_set(&dev->tagset);
+       blk_put_queue(dev->admin_q);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
        }
 
        nvme_init_queue(dev->queues[0], 0);
+       result = nvme_alloc_admin_tags(dev);
+       if (result)
+               goto disable;
 
        result = nvme_setup_io_queues(dev);
        if (result)
-               goto disable;
+               goto free_tags;
 
        nvme_set_irq_hints(dev);
 
        return result;
 
+ free_tags:
+       nvme_dev_remove_admin(dev);
  disable:
        nvme_disable_queue(dev, 0);
        nvme_dev_list_remove(dev);
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
                dev->reset_workfn = nvme_remove_disks;
                queue_work(nvme_workq, &dev->reset_work);
                spin_unlock(&dev_list_lock);
+       } else {
+               nvme_unfreeze_queues(dev);
+               nvme_set_irq_hints(dev);
        }
        dev->initialized = 1;
        return 0;
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
        flush_work(&dev->reset_work);
        misc_deregister(&dev->miscdev);
-       nvme_dev_remove(dev);
        nvme_dev_shutdown(dev);
+       nvme_dev_remove(dev);
        nvme_dev_remove_admin(dev);
        nvme_free_queues(dev, 0);
-       nvme_free_admin_tags(dev);
        nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
 }
index 7ef7c098708fc4e482181724d574555bbb9db6d7..cdfbd21e35975178fa0c4cece78a354ef1d53007 100644 (file)
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
                goto out_put_disk;
 
        q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
-       if (!q) {
+       if (IS_ERR(q)) {
                err = -ENOMEM;
                goto out_free_tags;
        }
index 978b51eae2ec61bbba18db37cf05b8e93db2037e..ce3c1558cb0a6f6cfa5a818736da54ff8fc5ed07 100644 (file)
 
 #define DLN2_GPIO_MAX_PINS 32
 
-struct dln2_irq_work {
-       struct work_struct work;
-       struct dln2_gpio *dln2;
-       int pin;
-       int type;
-};
-
 struct dln2_gpio {
        struct platform_device *pdev;
        struct gpio_chip gpio;
@@ -64,10 +57,12 @@ struct dln2_gpio {
         */
        DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
 
-       DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
-       DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
-       DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
-       struct dln2_irq_work *irq_work;
+       /* active IRQs - not synced to hardware */
+       DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
+       /* active IRQS - synced to hardware */
+       DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
+       int irq_type[DLN2_GPIO_MAX_PINS];
+       struct mutex irq_lock;
 };
 
 struct dln2_gpio_pin {
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
        return !!ret;
 }
 
-static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
-                                     unsigned int pin, int value)
+static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
+                                    unsigned int pin, int value)
 {
        struct dln2_gpio_pin_val req = {
                .pin = cpu_to_le16(pin),
                .value = value,
        };
 
-       dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
-                        sizeof(req));
+       return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
+                               sizeof(req));
 }
 
 #define DLN2_GPIO_DIRECTION_IN         0
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
                                      int value)
 {
+       struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
+       int ret;
+
+       ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
+       if (ret < 0)
+               return ret;
+
        return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
 }
 
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
                                &req, sizeof(req));
 }
 
-static void dln2_irq_work(struct work_struct *w)
-{
-       struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
-       struct dln2_gpio *dln2 = iw->dln2;
-       u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
-
-       if (test_bit(iw->pin, dln2->irqs_enabled))
-               dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
-       else
-               dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
-}
-
-static void dln2_irq_enable(struct irq_data *irqd)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
-       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
-       int pin = irqd_to_hwirq(irqd);
-
-       set_bit(pin, dln2->irqs_enabled);
-       schedule_work(&dln2->irq_work[pin].work);
-}
-
-static void dln2_irq_disable(struct irq_data *irqd)
+static void dln2_irq_unmask(struct irq_data *irqd)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
        struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
        int pin = irqd_to_hwirq(irqd);
 
-       clear_bit(pin, dln2->irqs_enabled);
-       schedule_work(&dln2->irq_work[pin].work);
+       set_bit(pin, dln2->unmasked_irqs);
 }
 
 static void dln2_irq_mask(struct irq_data *irqd)
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
        struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
        int pin = irqd_to_hwirq(irqd);
 
-       set_bit(pin, dln2->irqs_masked);
-}
-
-static void dln2_irq_unmask(struct irq_data *irqd)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
-       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
-       struct device *dev = dln2->gpio.dev;
-       int pin = irqd_to_hwirq(irqd);
-
-       if (test_and_clear_bit(pin, dln2->irqs_pending)) {
-               int irq;
-
-               irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
-               if (!irq) {
-                       dev_err(dev, "pin %d not mapped to IRQ\n", pin);
-                       return;
-               }
-
-               generic_handle_irq(irq);
-       }
+       clear_bit(pin, dln2->unmasked_irqs);
 }
 
 static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
 
        switch (type) {
        case IRQ_TYPE_LEVEL_HIGH:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
                break;
        case IRQ_TYPE_LEVEL_LOW:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
                break;
        case IRQ_TYPE_EDGE_BOTH:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
                break;
        case IRQ_TYPE_EDGE_RISING:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
                break;
        case IRQ_TYPE_EDGE_FALLING:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
                break;
        default:
                return -EINVAL;
@@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
        return 0;
 }
 
+static void dln2_irq_bus_lock(struct irq_data *irqd)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+
+       mutex_lock(&dln2->irq_lock);
+}
+
+static void dln2_irq_bus_unlock(struct irq_data *irqd)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+       int pin = irqd_to_hwirq(irqd);
+       int enabled, unmasked;
+       unsigned type;
+       int ret;
+
+       enabled = test_bit(pin, dln2->enabled_irqs);
+       unmasked = test_bit(pin, dln2->unmasked_irqs);
+
+       if (enabled != unmasked) {
+               if (unmasked) {
+                       type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
+                       set_bit(pin, dln2->enabled_irqs);
+               } else {
+                       type = DLN2_GPIO_EVENT_NONE;
+                       clear_bit(pin, dln2->enabled_irqs);
+               }
+
+               ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
+               if (ret)
+                       dev_err(dln2->gpio.dev, "failed to set event\n");
+       }
+
+       mutex_unlock(&dln2->irq_lock);
+}
+
 static struct irq_chip dln2_gpio_irqchip = {
        .name = "dln2-irq",
-       .irq_enable = dln2_irq_enable,
-       .irq_disable = dln2_irq_disable,
        .irq_mask = dln2_irq_mask,
        .irq_unmask = dln2_irq_unmask,
        .irq_set_type = dln2_irq_set_type,
+       .irq_bus_lock = dln2_irq_bus_lock,
+       .irq_bus_sync_unlock = dln2_irq_bus_unlock,
 };
 
 static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
@@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                return;
        }
 
-       if (!test_bit(pin, dln2->irqs_enabled))
-               return;
-       if (test_bit(pin, dln2->irqs_masked)) {
-               set_bit(pin, dln2->irqs_pending);
-               return;
-       }
-
-       switch (dln2->irq_work[pin].type) {
+       switch (dln2->irq_type[pin]) {
        case DLN2_GPIO_EVENT_CHANGE_RISING:
                if (event->value)
                        generic_handle_irq(irq);
@@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        struct dln2_gpio *dln2;
        struct device *dev = &pdev->dev;
        int pins;
-       int i, ret;
+       int ret;
 
        pins = dln2_gpio_get_pin_count(pdev);
        if (pins < 0) {
@@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        if (!dln2)
                return -ENOMEM;
 
-       dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
-                                     sizeof(struct dln2_irq_work), GFP_KERNEL);
-       if (!dln2->irq_work)
-               return -ENOMEM;
-       for (i = 0; i < pins; i++) {
-               INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
-               dln2->irq_work[i].pin = i;
-               dln2->irq_work[i].dln2 = dln2;
-       }
+       mutex_init(&dln2->irq_lock);
 
        dln2->pdev = pdev;
 
@@ -529,11 +510,8 @@ out:
 static int dln2_gpio_remove(struct platform_device *pdev)
 {
        struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
-       int i;
 
        dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
-       for (i = 0; i < dln2->gpio.ngpio; i++)
-               flush_work(&dln2->irq_work[i].work);
        gpiochip_remove(&dln2->gpio);
 
        return 0;
index 09daaf2aeb563d982c71807b8155df4b2c50a6c5..3a5a71050559c7c52964a5b55764ef9b16e82361 100644 (file)
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
        err = gpiochip_add(gc);
        if (err) {
                dev_err(&ofdev->dev, "Could not add gpiochip\n");
-               irq_domain_remove(priv->domain);
+               if (priv->domain)
+                       irq_domain_remove(priv->domain);
                return err;
        }
 
index a82e542ffc21dd4dccf9d1b810ab9373984c1d0d..0b380603a578543bcd275d248ade94f3f07b24a8 100644 (file)
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
        byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
        byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
        byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
-       byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
+       byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
        byte force_mt_info = false;
        byte dir;
        dword d;
index 26515c27ea8cca18a2e5f5ac24f82a6158194182..25e419752a7b7c5719baa69bd114d57720a62f92 100644 (file)
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
        led_dat->sata = 0;
        led_dat->cdev.brightness = LED_OFF;
        led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
-       /*
-        * If available, expose the SATA activity blink capability through
-        * a "sata" sysfs attribute.
-        */
-       if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
-               led_dat->cdev.groups = netxbig_led_groups;
        led_dat->mode_addr = template->mode_addr;
        led_dat->mode_val = template->mode_val;
        led_dat->bright_addr = template->bright_addr;
        led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
        led_dat->timer = pdata->timer;
        led_dat->num_timer = pdata->num_timer;
+       /*
+        * If available, expose the SATA activity blink capability through
+        * a "sata" sysfs attribute.
+        */
+       if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
+               led_dat->cdev.groups = netxbig_led_groups;
 
        return led_classdev_register(&pdev->dev, &led_dat->cdev);
 }
index 51fd6b524371ecd9ae762716e4cf33692c36ce45..d1b55fe62817dcd0261ab926a704a37f590ca67c 100644 (file)
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
        return 0;
 }
 
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct cxl_context *ctx = vma->vm_file->private_data;
+       unsigned long address = (unsigned long)vmf->virtual_address;
+       u64 area, offset;
+
+       offset = vmf->pgoff << PAGE_SHIFT;
+
+       pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+                       __func__, ctx->pe, address, offset);
+
+       if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+               area = ctx->afu->psn_phys;
+               if (offset > ctx->afu->adapter->ps_size)
+                       return VM_FAULT_SIGBUS;
+       } else {
+               area = ctx->psn_phys;
+               if (offset > ctx->psn_size)
+                       return VM_FAULT_SIGBUS;
+       }
+
+       mutex_lock(&ctx->status_mutex);
+
+       if (ctx->status != STARTED) {
+               mutex_unlock(&ctx->status_mutex);
+               pr_devel("%s: Context not started, failing problem state access\n", __func__);
+               return VM_FAULT_SIGBUS;
+       }
+
+       vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+       mutex_unlock(&ctx->status_mutex);
+
+       return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+       .fault = cxl_mmap_fault,
+};
+
 /*
  * Map a per-context mmio space into the given vma.
  */
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
        u64 len = vma->vm_end - vma->vm_start;
        len = min(len, ctx->psn_size);
 
-       if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-               return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
-       }
+       if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+               /* make sure there is a valid per process space for this AFU */
+               if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+                       pr_devel("AFU doesn't support mmio space\n");
+                       return -EINVAL;
+               }
 
-       /* make sure there is a valid per process space for this AFU */
-       if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
-               pr_devel("AFU doesn't support mmio space\n");
-               return -EINVAL;
+               /* Can't mmap until the AFU is enabled */
+               if (!ctx->afu->enabled)
+                       return -EBUSY;
        }
 
-       /* Can't mmap until the AFU is enabled */
-       if (!ctx->afu->enabled)
-               return -EBUSY;
-
        pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
                 ctx->psn_phys, ctx->pe , ctx->master);
 
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       return vm_iomap_memory(vma, ctx->psn_phys, len);
+       vma->vm_ops = &cxl_mmap_vmops;
+       return 0;
 }
 
 /*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
        afu_release_irqs(ctx);
        flush_work(&ctx->fault_work); /* Only needed for dedicated process */
        wake_up_all(&ctx->wq);
-
-       /* Release Problem State Area mapping */
-       mutex_lock(&ctx->mapping_lock);
-       if (ctx->mapping)
-               unmap_mapping_range(ctx->mapping, 0, 0, 1);
-       mutex_unlock(&ctx->mapping_lock);
 }
 
 /*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
                 * created and torn down after the IDR removed
                 */
                __detach_context(ctx);
+
+               /*
+                * We are force detaching - remove any active PSA mappings so
+                * userspace cannot interfere with the card if it comes back.
+                * Easiest way to exercise this is to unbind and rebind the
+                * driver via sysfs while it is in use.
+                */
+               mutex_lock(&ctx->mapping_lock);
+               if (ctx->mapping)
+                       unmap_mapping_range(ctx->mapping, 0, 0, 1);
+               mutex_unlock(&ctx->mapping_lock);
        }
        mutex_unlock(&afu->contexts_lock);
 }
index e9f2f10dbb3734f3de4df60cdaed583415cfd831..b15d8113877c9f6ed5c45c58fb012fa6f50276aa 100644 (file)
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
        pr_devel("%s: pe: %i\n", __func__, ctx->pe);
 
-       mutex_lock(&ctx->status_mutex);
-       if (ctx->status != OPENED) {
-               rc = -EIO;
-               goto out;
-       }
-
+       /* Do this outside the status_mutex to avoid a circular dependency with
+        * the locking in cxl_mmap_fault() */
        if (copy_from_user(&work, uwork,
                           sizeof(struct cxl_ioctl_start_work))) {
                rc = -EFAULT;
                goto out;
        }
 
+       mutex_lock(&ctx->status_mutex);
+       if (ctx->status != OPENED) {
+               rc = -EIO;
+               goto out;
+       }
+
        /*
         * if any of the reserved fields are set or any of the unused
         * flags are set it's invalid
index e3e56d35f0eeee634a0930acce0500bb33cb3258..970314e0aac8f1f05e5b9ff199e28723b9d85a19 100644 (file)
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
        { "INT33BB"  , "3" , &sdhci_acpi_slot_int_sd },
        { "INT33C6"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "INT3436"  , NULL, &sdhci_acpi_slot_int_sdio },
+       { "INT344D"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "PNP0D40"  },
        { },
 };
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
        { "INT33BB"  },
        { "INT33C6"  },
        { "INT3436"  },
+       { "INT344D"  },
        { "PNP0D40"  },
        { },
 };
index 03427755b9029b297393d1d43851b4f678f2bbae..4f38554ce6797e0a461a3ddaf76261264229f107 100644 (file)
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = {
                .subdevice      = PCI_ANY_ID,
                .driver_data    = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
        },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_O2,
                .device         = PCI_DEVICE_ID_O2_8120,
index d57c3d169914e94e716b64b90e8b8da2b86afa87..1ec684d06d54733b35b2427d4007bfd5cfd3d52b 100644 (file)
@@ -21,6 +21,9 @@
 #define PCI_DEVICE_ID_INTEL_CLV_EMMC0  0x08e5
 #define PCI_DEVICE_ID_INTEL_CLV_EMMC1  0x08e6
 #define PCI_DEVICE_ID_INTEL_QRK_SD     0x08A7
+#define PCI_DEVICE_ID_INTEL_SPT_EMMC   0x9d2b
+#define PCI_DEVICE_ID_INTEL_SPT_SDIO   0x9d2c
+#define PCI_DEVICE_ID_INTEL_SPT_SD     0x9d2d
 
 /*
  * PCI registers
index 45238871192da1d6553268d82659dce3cc600d68..ca3424e7ef717c59a82da1e13a8ca3cf939da7f2 100644 (file)
@@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (IS_ERR(host))
                return PTR_ERR(host);
 
-       if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
-               ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
-               if (ret < 0)
-                       goto err_mbus_win;
-       }
-
-
        pltfm_host = sdhci_priv(host);
        pltfm_host->priv = pxa;
 
@@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (!IS_ERR(pxa->clk_core))
                clk_prepare_enable(pxa->clk_core);
 
+       if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+               ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+               if (ret < 0)
+                       goto err_mbus_win;
+       }
+
        /* enable 1/8V DDR capable */
        host->mmc->caps |= MMC_CAP_1_8V_DDR;
 
@@ -396,11 +395,11 @@ err_add_host:
        pm_runtime_disable(&pdev->dev);
 err_of_parse:
 err_cd_req:
+err_mbus_win:
        clk_disable_unprepare(pxa->clk_io);
        if (!IS_ERR(pxa->clk_core))
                clk_disable_unprepare(pxa->clk_core);
 err_clk_get:
-err_mbus_win:
        sdhci_pltfm_free(pdev);
        return ret;
 }
index cbb245b5853873cbddc2f1a0967c8bcc989ea4e1..f1a488ee432f891971f79707d78ca91a631b6c51 100644 (file)
@@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host)
 
                del_timer_sync(&host->tuning_timer);
                host->flags &= ~SDHCI_NEEDS_RETUNING;
-               host->mmc->max_blk_count =
-                       (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
        }
        sdhci_enable_card_detection(host);
 }
@@ -1273,6 +1271,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
                spin_unlock_irq(&host->lock);
                mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
                spin_lock_irq(&host->lock);
+
+               if (mode != MMC_POWER_OFF)
+                       sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+               else
+                       sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
                return;
        }
 
@@ -1353,6 +1357,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
        sdhci_runtime_pm_get(host);
 
+       present = mmc_gpio_get_cd(host->mmc);
+
        spin_lock_irqsave(&host->lock, flags);
 
        WARN_ON(host->mrq != NULL);
@@ -1381,7 +1387,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
         *     zero: cd-gpio is used, and card is removed
         *     one: cd-gpio is used, and card is present
         */
-       present = mmc_gpio_get_cd(host->mmc);
        if (present < 0) {
                /* If polling, assume that the card is always present. */
                if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1880,6 +1885,18 @@ static int sdhci_card_busy(struct mmc_host *mmc)
        return !(present_state & SDHCI_DATA_LVL_MASK);
 }
 
+static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&host->lock, flags);
+       host->flags |= SDHCI_HS400_TUNING;
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       return 0;
+}
+
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
        struct sdhci_host *host = mmc_priv(mmc);
@@ -1887,10 +1904,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
        int tuning_loop_counter = MAX_TUNING_LOOP;
        int err = 0;
        unsigned long flags;
+       unsigned int tuning_count = 0;
+       bool hs400_tuning;
 
        sdhci_runtime_pm_get(host);
        spin_lock_irqsave(&host->lock, flags);
 
+       hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+       host->flags &= ~SDHCI_HS400_TUNING;
+
+       if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+               tuning_count = host->tuning_count;
+
        /*
         * The Host Controller needs tuning only in case of SDR104 mode
         * and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1899,8 +1924,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * tuning function has to be executed.
         */
        switch (host->timing) {
+       /* HS400 tuning is done in HS200 mode */
        case MMC_TIMING_MMC_HS400:
+               err = -EINVAL;
+               goto out_unlock;
+
        case MMC_TIMING_MMC_HS200:
+               /*
+                * Periodic re-tuning for HS400 is not expected to be needed, so
+                * disable it here.
+                */
+               if (hs400_tuning)
+                       tuning_count = 0;
+               break;
+
        case MMC_TIMING_UHS_SDR104:
                break;
 
@@ -1911,9 +1948,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
                /* FALLTHROUGH */
 
        default:
-               spin_unlock_irqrestore(&host->lock, flags);
-               sdhci_runtime_pm_put(host);
-               return 0;
+               goto out_unlock;
        }
 
        if (host->ops->platform_execute_tuning) {
@@ -2037,24 +2072,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
        }
 
 out:
-       /*
-        * If this is the very first time we are here, we start the retuning
-        * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
-        * flag won't be set, we check this condition before actually starting
-        * the timer.
-        */
-       if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
-           (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+       host->flags &= ~SDHCI_NEEDS_RETUNING;
+
+       if (tuning_count) {
                host->flags |= SDHCI_USING_RETUNING_TIMER;
-               mod_timer(&host->tuning_timer, jiffies +
-                       host->tuning_count * HZ);
-               /* Tuning mode 1 limits the maximum data length to 4MB */
-               mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
-       } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
-               host->flags &= ~SDHCI_NEEDS_RETUNING;
-               /* Reload the new initial value for timer */
-               mod_timer(&host->tuning_timer, jiffies +
-                         host->tuning_count * HZ);
+               mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
        }
 
        /*
@@ -2070,6 +2092,7 @@ out:
 
        sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+out_unlock:
        spin_unlock_irqrestore(&host->lock, flags);
        sdhci_runtime_pm_put(host);
 
@@ -2110,15 +2133,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
 {
        struct sdhci_host *host = mmc_priv(mmc);
        unsigned long flags;
+       int present;
 
        /* First check if client has provided their own card event */
        if (host->ops->card_event)
                host->ops->card_event(host);
 
+       present = sdhci_do_get_cd(host);
+
        spin_lock_irqsave(&host->lock, flags);
 
        /* Check host->mrq first in case we are runtime suspended */
-       if (host->mrq && !sdhci_do_get_cd(host)) {
+       if (host->mrq && !present) {
                pr_err("%s: Card removed during transfer!\n",
                        mmc_hostname(host->mmc));
                pr_err("%s: Resetting controller.\n",
@@ -2142,6 +2168,7 @@ static const struct mmc_host_ops sdhci_ops = {
        .hw_reset       = sdhci_hw_reset,
        .enable_sdio_irq = sdhci_enable_sdio_irq,
        .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
+       .prepare_hs400_tuning           = sdhci_prepare_hs400_tuning,
        .execute_tuning                 = sdhci_execute_tuning,
        .card_event                     = sdhci_card_event,
        .card_busy      = sdhci_card_busy,
@@ -3260,8 +3287,9 @@ int sdhci_add_host(struct sdhci_host *host)
                mmc->max_segs = SDHCI_MAX_SEGS;
 
        /*
-        * Maximum number of sectors in one transfer. Limited by DMA boundary
-        * size (512KiB).
+        * Maximum number of sectors in one transfer. Limited by SDMA boundary
+        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+        * is less anyway.
         */
        mmc->max_req_size = 524288;
 
index e398eda0729832671561490c6d18fce9db485f5e..c8af3ce3ea38d16d4c470ec5773b2d4f088b0f15 100644 (file)
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
        schedule_work(&alx->reset_wk);
 }
 
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
 {
        struct alx_rx_queue *rxq = &alx->rxq;
        struct alx_rrd *rrd;
        struct alx_buffer *rxb;
        struct sk_buff *skb;
        u16 length, rfd_cleaned = 0;
+       int work = 0;
 
-       while (budget > 0) {
+       while (work < budget) {
                rrd = &rxq->rrd[rxq->rrd_read_idx];
                if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
                        break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                    ALX_GET_FIELD(le32_to_cpu(rrd->word0),
                                  RRD_NOR) != 1) {
                        alx_schedule_reset(alx);
-                       return 0;
+                       return work;
                }
 
                rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                }
 
                napi_gro_receive(&alx->napi, skb);
-               budget--;
+               work++;
 
 next_pkt:
                if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
        if (rfd_cleaned)
                alx_refill_rx_ring(alx, GFP_ATOMIC);
 
-       return budget > 0;
+       return work;
 }
 
 static int alx_poll(struct napi_struct *napi, int budget)
 {
        struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
        struct alx_hw *hw = &alx->hw;
-       bool complete = true;
        unsigned long flags;
+       bool tx_complete;
+       int work;
 
-       complete = alx_clean_tx_irq(alx) &&
-                  alx_clean_rx_irq(alx, budget);
+       tx_complete = alx_clean_tx_irq(alx);
+       work = alx_clean_rx_irq(alx, budget);
 
-       if (!complete)
-               return 1;
+       if (!tx_complete || work == budget)
+               return budget;
 
        napi_complete(&alx->napi);
 
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
 
        alx_post_write(hw);
 
-       return 0;
+       return work;
 }
 
 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
index 553dcd8a9df29f64108285c1f520635e287f7ca8..96bf01ba32dda179b15d2c36d6168581ee43c250 100644 (file)
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
 }
 
 static void tg3_irq_quiesce(struct tg3 *tp)
+       __releases(tp->lock)
+       __acquires(tp->lock)
 {
        int i;
 
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
        tp->irq_sync = 1;
        smp_mb();
 
+       spin_unlock_bh(&tp->lock);
+
        for (i = 0; i < tp->irq_cnt; i++)
                synchronize_irq(tp->napi[i].irq_vec);
+
+       spin_lock_bh(&tp->lock);
 }
 
 /* Fully shutdown all tg3 driver activity elsewhere in the system.
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
 
 /* tp->lock is held. */
 static int tg3_chip_reset(struct tg3 *tp)
+       __releases(tp->lock)
+       __acquires(tp->lock)
 {
        u32 val;
        void (*write_op)(struct tg3 *, u32, u32);
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
        }
        smp_mb();
 
+       tg3_full_unlock(tp);
+
        for (i = 0; i < tp->irq_cnt; i++)
                synchronize_irq(tp->napi[i].irq_vec);
 
+       tg3_full_lock(tp, 0);
+
        if (tg3_asic_rev(tp) == ASIC_REV_57780) {
                val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
                tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
 {
        struct tg3 *tp = (struct tg3 *) __opaque;
 
-       if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
-               goto restart_timer;
-
        spin_lock(&tp->lock);
 
+       if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
+               spin_unlock(&tp->lock);
+               goto restart_timer;
+       }
+
        if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
            tg3_flag(tp, 57765_CLASS))
                tg3_chk_missed_msi(tp);
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
        struct tg3 *tp = container_of(work, struct tg3, reset_task);
        int err;
 
+       rtnl_lock();
        tg3_full_lock(tp, 0);
 
        if (!netif_running(tp->dev)) {
                tg3_flag_clear(tp, RESET_TASK_PENDING);
                tg3_full_unlock(tp);
+               rtnl_unlock();
                return;
        }
 
@@ -11138,6 +11154,7 @@ out:
                tg3_phy_start(tp);
 
        tg3_flag_clear(tp, RESET_TASK_PENDING);
+       rtnl_unlock();
 }
 
 static int tg3_request_irq(struct tg3 *tp, int irq_num)
index 55eb7f2af2b41ccf031f5018c12137daf752dc6c..7ef55f5fa664480ce052720bc55bd5ffb9ca8b57 100644 (file)
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
                res = PTR_ERR(lp->pclk);
                goto err_free_dev;
        }
-       clk_enable(lp->pclk);
+       clk_prepare_enable(lp->pclk);
 
        lp->hclk = ERR_PTR(-ENOENT);
        lp->tx_clk = ERR_PTR(-ENOENT);
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
 err_out_unregister_netdev:
        unregister_netdev(dev);
 err_disable_clock:
-       clk_disable(lp->pclk);
+       clk_disable_unprepare(lp->pclk);
 err_free_dev:
        free_netdev(dev);
        return res;
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
        kfree(lp->mii_bus->irq);
        mdiobus_free(lp->mii_bus);
        unregister_netdev(dev);
-       clk_disable(lp->pclk);
+       clk_disable_unprepare(lp->pclk);
        free_netdev(dev);
 
        return 0;
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
                netif_stop_queue(net_dev);
                netif_device_detach(net_dev);
 
-               clk_disable(lp->pclk);
+               clk_disable_unprepare(lp->pclk);
        }
        return 0;
 }
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
        struct macb *lp = netdev_priv(net_dev);
 
        if (netif_running(net_dev)) {
-               clk_enable(lp->pclk);
+               clk_prepare_enable(lp->pclk);
 
                netif_device_attach(net_dev);
                netif_start_queue(net_dev);
index 2215d432a05958ddb25e6b0d1a4bee27562f748a..a936ee8958c704fa90321a1934b2c7dacbf046a6 100644 (file)
@@ -2430,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter)
         */
        n10g = 0;
        for_each_port(adapter, pidx)
-               n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+               n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
 
        /*
         * We default to 1 queue per non-10G port and up to # of cores queues
index 21dc9a20308c58dabef4b77b1fad338547e20df3..60426cf890a774dd07ec939a622f88991c1044f6 100644 (file)
@@ -323,6 +323,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
                return v;
 
        v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+       pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
+                       FW_PORT_CMD_MDIOADDR_G(v) : -1;
        pi->port_type = FW_PORT_CMD_PTYPE_G(v);
        pi->mod_type = FW_PORT_MOD_TYPE_NA;
 
index a379c3e4b57f73ef3fd133bd4bb5114e29f10cb4..13d00a38a5bd60ed1e7af054f3a22b617e64317d 100644 (file)
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
                 * break out of while loop if there are no more
                 * packets waiting
                 */
-               if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
-                       napi_complete(napi);
-                       int_enable = dnet_readl(bp, INTR_ENB);
-                       int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
-                       dnet_writel(bp, int_enable, INTR_ENB);
-                       return 0;
-               }
+               if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
+                       break;
 
                cmd_word = dnet_readl(bp, RX_LEN_FIFO);
                pkt_len = cmd_word & 0xFFFF;
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
                               "size %u.\n", dev->name, pkt_len);
        }
 
-       budget -= npackets;
-
        if (npackets < budget) {
                /* We processed all packets available.  Tell NAPI it can
-                * stop polling then re-enable rx interrupts */
+                * stop polling then re-enable rx interrupts.
+                */
                napi_complete(napi);
                int_enable = dnet_readl(bp, INTR_ENB);
                int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
                dnet_writel(bp, int_enable, INTR_ENB);
-               return 0;
        }
 
-       /* There are still packets waiting */
-       return 1;
+       return npackets;
 }
 
 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
index 469691ad4a1ee25dda5ff9dbec0ef8735ae0f6bf..40132929daf7ac7713f2cd0f1f469656616fadfc 100644 (file)
@@ -424,6 +424,8 @@ struct bufdesc_ex {
  * (40ns * 6).
  */
 #define FEC_QUIRK_BUG_CAPTURE          (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO          (1 << 11)
 
 struct fec_enet_priv_tx_q {
        int index;
index 5ebdf8dc8a31300f526fd98912080dc4850937f2..bba87775419dc9c0d5adadcdda8c1e634dc8e04d 100644 (file)
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
                .driver_data = 0,
        }, {
                .name = "imx28-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+                               FEC_QUIRK_SINGLE_MDIO,
        }, {
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        int err = -ENXIO, i;
 
        /*
-        * The dual fec interfaces are not equivalent with enet-mac.
+        * The i.MX28 dual fec interfaces are not equal.
         * Here are the differences:
         *
         *  - fec0 supports MII & RMII modes while fec1 only supports RMII
@@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * mdio interface in board design, and need to be configured by
         * fec0 mii_bus.
         */
-       if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+       if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
                /* fec1 uses fec0 mii_bus */
                if (mii_cnt && fec0_mii_bus) {
                        fep->mii_bus = fec0_mii_bus;
@@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        mii_cnt++;
 
        /* save fec0 mii_bus */
-       if (fep->quirks & FEC_QUIRK_ENET_MAC)
+       if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
                fec0_mii_bus = fep->mii_bus;
 
        return 0;
@@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev)
                pdev->id_entry = of_id->data;
        fep->quirks = pdev->id_entry->driver_data;
 
+       fep->netdev = ndev;
        fep->num_rx_queues = num_rx_qs;
        fep->num_tx_queues = num_tx_qs;
 
index 5b8300a32bf5f5eb1df93d7262b22a00b7d77a9f..4d61ef50b465b73bd4bd87256a2ad47d83d4d666 100644 (file)
@@ -281,6 +281,17 @@ config I40E_DCB
 
          If unsure, say N.
 
+config I40E_FCOE
+       bool "Fibre Channel over Ethernet (FCoE)"
+       default n
+       depends on I40E && DCB && FCOE
+       ---help---
+         Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
+         in the driver. This will create new netdev for exclusive FCoE
+         use with XL710 FCoE offloads enabled.
+
+         If unsure, say N.
+
 config I40EVF
        tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
        depends on PCI_MSI
index 4b94ddb29c248ed2571c4d90eb37d8c8ecbec935..c405819991214e21a25b5a670d0097482fd643ab 100644 (file)
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
        i40e_virtchnl_pf.o
 
 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
+i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
index 045b5c4b98b38ba74ef68104828351f2a8fdc67c..ad802dd0f67a3d4fdcb6810ebdc8d66b67706d66 100644 (file)
@@ -78,7 +78,7 @@ do {                                                            \
 } while (0)
 
 typedef enum i40e_status_code i40e_status;
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifdef CONFIG_I40E_FCOE
 #define I40E_FCOE
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif
 #endif /* _I40E_OSDEP_H_ */
index 04b441460bbda6e36cb64731d24247a1b954e506..cecb340898fe2a881aac449c7377110ccc56793b 100644 (file)
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
        return le32_to_cpu(*(volatile __le32 *)head);
 }
 
+#define WB_STRIDE 0x3
+
 /**
  * i40e_clean_tx_irq - Reclaim resources after transmit completes
  * @tx_ring:  tx ring to clean
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
+       /* check to see if there are any non-cache aligned descriptors
+        * waiting to be written back, and kick the hardware to force
+        * them to be written back in case of napi polling
+        */
+       if (budget &&
+           !((i & WB_STRIDE) == WB_STRIDE) &&
+           !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+           (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+               tx_ring->arm_wb = true;
+       else
+               tx_ring->arm_wb = false;
+
        if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
                /* schedule immediate reset if we believe we hung */
                dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
                dev_info(tx_ring->dev,
-                        "tx hang detected on queue %d, resetting adapter\n",
+                        "tx hang detected on queue %d, reset requested\n",
                         tx_ring->queue_index);
 
-               tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+               /* do not fire the reset immediately, wait for the stack to
+                * decide we are truly stuck, also prevents every queue from
+                * simultaneously requesting a reset
+                */
 
-               /* the adapter is about to reset, no point in enabling stuff */
-               return true;
+               /* the adapter is about to reset, no point in enabling polling */
+               budget = 1;
        }
 
        netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                }
        }
 
-       return budget > 0;
+       return !!budget;
+}
+
+/**
+ * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector  on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+       u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
+                 /* allow 00 to be written to the index */;
+
+       wr32(&vsi->back->hw,
+            I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+            val);
 }
 
 /**
@@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * so the total length of IPv4 header is IHL*4 bytes
         * The UDP_0 bit *may* bet set if the *inner* header is UDP
         */
-       if (ipv4_tunnel &&
-           (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
-           !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+       if (ipv4_tunnel) {
                skb->transport_header = skb->mac_header +
                                        sizeof(struct ethhdr) +
                                        (ip_hdr(skb)->ihl * 4);
@@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                          skb->protocol == htons(ETH_P_8021AD))
                                          ? VLAN_HLEN : 0;
 
-               rx_udp_csum = udp_csum(skb);
-               iph = ip_hdr(skb);
-               csum = csum_tcpudp_magic(
-                               iph->saddr, iph->daddr,
-                               (skb->len - skb_transport_offset(skb)),
-                               IPPROTO_UDP, rx_udp_csum);
+               if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+                   (udp_hdr(skb)->check != 0)) {
+                       rx_udp_csum = udp_csum(skb);
+                       iph = ip_hdr(skb);
+                       csum = csum_tcpudp_magic(
+                                       iph->saddr, iph->daddr,
+                                       (skb->len - skb_transport_offset(skb)),
+                                       IPPROTO_UDP, rx_udp_csum);
 
-               if (udp_hdr(skb)->check != csum)
-                       goto checksum_fail;
+                       if (udp_hdr(skb)->check != csum)
+                               goto checksum_fail;
+
+               } /* else its GRE and so no outer UDP header */
        }
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        struct i40e_vsi *vsi = q_vector->vsi;
        struct i40e_ring *ring;
        bool clean_complete = true;
+       bool arm_wb = false;
        int budget_per_ring;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        /* Since the actual Tx work is minimal, we can give the Tx a larger
         * budget and be more aggressive about cleaning up the Tx descriptors.
         */
-       i40e_for_each_ring(ring, q_vector->tx)
+       i40e_for_each_ring(ring, q_vector->tx) {
                clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+               arm_wb |= ring->arm_wb;
+       }
 
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
@@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
 
        /* If work not completed, return budget and polling will return */
-       if (!clean_complete)
+       if (!clean_complete) {
+               if (arm_wb)
+                       i40e_force_wb(vsi, q_vector);
                return budget;
+       }
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
@@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       if (protocol == htons(ETH_P_IP)) {
-               iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+       if (iph->version == 4) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
                                                 0, IPPROTO_TCP, 0);
-       } else if (skb_is_gso_v6(skb)) {
-
-               ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
-                                          : ipv6_hdr(skb);
+       } else if (ipv6h->version == 6) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                ipv6h->payload_len = 0;
                tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
                } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
-                               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       if (tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
-                       } else {
-                               *cd_tunneling |=
-                                        I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-                       }
                }
 
                /* Now set the ctx descriptor fields */
@@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
+               if (this_ip_hdr->version == 6) {
+                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       tx_flags |= I40E_TX_FLAGS_IPV6;
+               }
        } else {
                network_hdr_len = skb_network_header_len(skb);
                this_ip_hdr = ip_hdr(skb);
@@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        /* Place RS bit on last descriptor of any packet that spans across the
         * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
         */
-#define WB_STRIDE 0x3
        if (((i & WB_STRIDE) != WB_STRIDE) &&
            (first <= &tx_ring->tx_bi[i]) &&
            (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
index e60d3accb2e2ec3f2992b056da718d633b978157..18b00231d2f117d714e7e1399aecba0061ead41a 100644 (file)
@@ -241,6 +241,7 @@ struct i40e_ring {
        unsigned long last_rx_timestamp;
 
        bool ring_active;               /* is ring online or not */
+       bool arm_wb;            /* do something to arm write back */
 
        /* stats structs */
        struct i40e_queue_stats stats;
index c29ba80ae02bfde60f41f4118c02bab642303b89..37583a9d88534346922b9f3afd9814dd6fe85a64 100644 (file)
@@ -473,6 +473,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
+       .fdr_value      = 0x00000f0f,
 
        .apr            = 1,
        .mpr            = 1,
@@ -495,6 +496,7 @@ static struct sh_eth_cpu_data r8a779x_data = {
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
+       .fdr_value      = 0x00000f0f,
 
        .apr            = 1,
        .mpr            = 1,
@@ -536,6 +538,8 @@ static struct sh_eth_cpu_data sh7724_data = {
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
 
+       .trscer_err_mask = DESC_I_RINT8,
+
        .apr            = 1,
        .mpr            = 1,
        .tpauser        = 1,
@@ -856,6 +860,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 
        if (!cd->eesr_err_check)
                cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+       if (!cd->trscer_err_mask)
+               cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
 }
 
 static int sh_eth_check_reset(struct net_device *ndev)
@@ -1294,7 +1301,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        /* Frame recv control (enable multiple-packets per rx irq) */
        sh_eth_write(ndev, RMCR_RNC, RMCR);
 
-       sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
+       sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
 
        if (mdp->cd->bculr)
                sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
index 22301bf9c21daeb925d75aa7ce5c7a588d977e11..71f5de1171bd93d004beacf880c387a4168eaada 100644 (file)
@@ -369,6 +369,8 @@ enum DESC_I_BIT {
        DESC_I_RINT1 = 0x0001,
 };
 
+#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+
 /* RPADIR */
 enum RPADIR_BIT {
        RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@@ -470,6 +472,9 @@ struct sh_eth_cpu_data {
        unsigned long tx_check;
        unsigned long eesr_err_check;
 
+       /* Error mask */
+       unsigned long trscer_err_mask;
+
        /* hardware features */
        unsigned long irq_flags; /* IRQ configuration flags */
        unsigned no_psr:1;      /* EtherC DO NOT have PSR */
index e61ee8351272bceda3c98a8fb21fcb216c6f56bb..64d1cef4cda1e52167a36439256fdb7c0d967efc 100644 (file)
@@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 
                        /* Clear all mcast from ALE */
                        cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
-                                                priv->host_port);
+                                                priv->host_port, -1);
 
                        /* Flood All Unicast Packets to Host port */
                        cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
+       int vid;
+
+       if (priv->data.dual_emac)
+               vid = priv->slaves[priv->emac_port].port_vlan;
+       else
+               vid = priv->data.default_vlan;
 
        if (ndev->flags & IFF_PROMISC) {
                /* Enable promiscuous mode */
@@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
        cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
 
        /* Clear all mcast from ALE */
-       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
+                                vid);
 
        if (!netdev_mc_empty(ndev)) {
                struct netdev_hw_addr *ha;
index 097ebe7077ac0c8de51e3eb7e8da5809f5e6bcea..5246b3a18ff86e8494d30db90c6154ba9f7ed25f 100644 (file)
@@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
                cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
 }
 
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS];
        int ret, idx;
@@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
                if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
                        continue;
 
+               /* if vid passed is -1 then remove all multicast entry from
+                * the table irrespective of vlan id, if a valid vlan id is
+                * passed then remove only multicast added to that vlan id.
+                * if vlan id doesn't match then move on to next entry.
+                */
+               if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+                       continue;
+
                if (cpsw_ale_get_mcast(ale_entry)) {
                        u8 addr[6];
 
index c0d4127aa549285c7e50e47214c1579e17478210..af1e7ecd87c6fbd24b80954c7977e96aa3676a0c 100644 (file)
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
 
 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
                       int flags, u16 vid);
 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
index 93e224217e24b36b089102be11ada5921f62d83b..f7ff493f1e73dfa129dbb10ed68c6436c52a4b1b 100644 (file)
@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
 static void team_notify_peers_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, notify_peers.dw.work);
 
@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
                schedule_delayed_work(&team->notify_peers.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->notify_peers.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+       if (val)
                schedule_delayed_work(&team->notify_peers.dw,
                                      msecs_to_jiffies(team->notify_peers.interval));
 }
@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
 static void team_mcast_rejoin_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, mcast_rejoin.dw.work);
 
@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
                schedule_delayed_work(&team->mcast_rejoin.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+       if (val)
                schedule_delayed_work(&team->mcast_rejoin.dw,
                                      msecs_to_jiffies(team->mcast_rejoin.interval));
 }
index dcb6d33141e0640f545555848434d8efd7822878..1e9cdca370144cffb22141e4c03aa3ff800aefdd 100644 (file)
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
         awd.done = 0;
 
         urb->context = &awd;
-        status = usb_submit_urb(urb, GFP_NOIO);
+        status = usb_submit_urb(urb, GFP_ATOMIC);
         if (status) {
                 // something went wrong
                 usb_free_urb(urb);
index e5be2d21868fa975619b72fb8217cad0f54cefe6..a5f9198d57473e6ab18966ca58ec88047f4428dc 100644 (file)
@@ -69,8 +69,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  10
-#define IWL3160_UCODE_API_MAX  10
+#define IWL7260_UCODE_API_MAX  12
+#define IWL3160_UCODE_API_MAX  12
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   10
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
 #define IWL7265D_FW_PRE "iwlwifi-7265D-"
-#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_7000         0
 
index bf0a95cb71535f390dfcfcc5820d39901359d440..3668fc57e7708be5e655353078f0bb44589ad6a1 100644 (file)
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  10
+#define IWL8000_UCODE_API_MAX  12
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   10
index f2a047f6bb3e519a6ab3ce3032be18aac3aa9aa9..1bbe4fc47b97bcbcd4c828ce094f487c81edd357 100644 (file)
@@ -243,6 +243,9 @@ enum iwl_ucode_tlv_flag {
  * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *     longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
+ *     regardless of the band or the number of the probes. FW will calculate
+ *     the actual dwell time.
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID     = BIT(0),
@@ -253,6 +256,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_LMAC_SCAN             = BIT(6),
        IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF     = BIT(7),
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
+       IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
 };
 
 /**
index 1f2acf47bfb2c78ddb1ee623f9254e35fa8eb109..201846de94e7d949819afacd8f5bac3d9379a6ec 100644 (file)
@@ -672,6 +672,7 @@ struct iwl_scan_channel_opt {
  * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
  * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
  *     and DS parameter set IEs into probe requests.
+ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
  */
 enum iwl_mvm_lmac_scan_flags {
        IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL         = BIT(0),
@@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags {
        IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS   = BIT(4),
        IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED       = BIT(5),
        IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED     = BIT(6),
+       IWL_MVM_LMAC_SCAN_FLAG_MATCH            = BIT(9),
 };
 
 enum iwl_scan_priority {
index e5294d01181e404c44d78190df5ae869d8c0cd21..ec9a8e7bae1de2934d9fddcd26e7d4b481cdafb9 100644 (file)
@@ -171,15 +171,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
  * already included in the probe template, so we need to set only
  * req->n_ssids - 1 bits in addition to the first bit.
  */
-static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
+                                   enum ieee80211_band band, int n_ssids)
 {
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+               return 10;
        if (band == IEEE80211_BAND_2GHZ)
                return 20  + 3 * (n_ssids + 1);
        return 10  + 2 * (n_ssids + 1);
 }
 
-static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
+                                    enum ieee80211_band band)
 {
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+                       return 110;
        return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
 }
 
@@ -331,7 +337,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                 */
                if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                        u32 passive_dwell =
-                               iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ);
+                               iwl_mvm_get_passive_dwell(mvm,
+                                                         IEEE80211_BAND_2GHZ);
                        params->max_out_time = passive_dwell;
                } else {
                        params->passive_fragmented = true;
@@ -348,8 +355,8 @@ not_bound:
                        params->dwell[band].passive = frag_passive_dwell;
                else
                        params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(band);
-               params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+                               iwl_mvm_get_passive_dwell(mvm, band);
+               params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
                                                                      n_ssids);
        }
 }
@@ -1448,6 +1455,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
 
        if (iwl_mvm_scan_pass_all(mvm, req))
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+       else
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
 
        if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
index 4f15d9decc81bd4fe80bd2ac716667e2c3b97aa1..4333306ccdee75a02952288196897d63a349b6b0 100644 (file)
@@ -108,8 +108,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
        }
 
-       /* tid_tspec will default to 0 = BE when QOS isn't enabled */
-       ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+       /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+       if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
+               ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+       else
+               ac = tid_to_mac80211_ac[0];
+
        tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
                        TX_CMD_FLG_BT_PRIO_POS;
 
index e56e77ef5d2e3d28d9a30185ef257aa43912ed23..917431e30f747bf9411c8c33b1f013d18b14f29e 100644 (file)
@@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
        if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
                return false;
 
-       if (!mvm->cfg->rx_with_siso_diversity)
+       if (mvm->cfg->rx_with_siso_diversity)
                return false;
 
        ieee80211_iterate_active_interfaces_atomic(
index 2f0c4b1703442b85b3a56bd90c0548995e0bc661..d5aadb00dd9e4a6acf4e29c72dd0d55d59d59203 100644 (file)
@@ -527,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        else if (cfg == &iwl7265_n_cfg)
                cfg_7265d = &iwl7265d_n_cfg;
        if (cfg_7265d &&
-           (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+           (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
                cfg = cfg_7265d;
+               iwl_trans->cfg = cfg_7265d;
+       }
 #endif
 
        pci_set_drvdata(pdev, iwl_trans);
index 846a2e6e34d855d62726eda65b51ee427bc1a939..c70efb9a6e78ccf22170bb8bbfe0abe89be695ef 100644 (file)
@@ -666,7 +666,8 @@ tx_status_ok:
 }
 
 static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
-                                   u8 *entry, int rxring_idx, int desc_idx)
+                                   struct sk_buff *new_skb, u8 *entry,
+                                   int rxring_idx, int desc_idx)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
        u8 tmp_one = 1;
        struct sk_buff *skb;
 
+       if (likely(new_skb)) {
+               skb = new_skb;
+               goto remap;
+       }
        skb = dev_alloc_skb(rtlpci->rxbuffersize);
        if (!skb)
                return 0;
-       rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
 
+remap:
        /* just set skb->cb to mapping addr for pci_unmap_single use */
        *((dma_addr_t *)skb->cb) =
                pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
        bufferaddress = *((dma_addr_t *)skb->cb);
        if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
                return 0;
+       rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
        if (rtlpriv->use_new_trx_flow) {
                rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
                                            HW_DESC_RX_PREPARE,
@@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                /*rx pkt */
                struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
                                      rtlpci->rx_ring[rxring_idx].idx];
+               struct sk_buff *new_skb;
 
                if (rtlpriv->use_new_trx_flow) {
                        rx_remained_cnt =
@@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
                                 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
 
+               /* get a new skb - if fail, old one will be reused */
+               new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+               if (unlikely(!new_skb)) {
+                       pr_err("Allocation of new skb failed in %s\n",
+                              __func__);
+                       goto no_new;
+               }
                if (rtlpriv->use_new_trx_flow) {
                        buffer_desc =
                          &rtlpci->rx_ring[rxring_idx].buffer_desc
@@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                        schedule_work(&rtlpriv->works.lps_change_work);
                }
 end:
+               skb = new_skb;
+no_new:
                if (rtlpriv->use_new_trx_flow) {
-                       _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+                       _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
                                                 rxring_idx,
-                                              rtlpci->rx_ring[rxring_idx].idx);
+                                                rtlpci->rx_ring[rxring_idx].idx);
                } else {
-                       _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+                       _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+                                                rxring_idx,
                                                 rtlpci->rx_ring[rxring_idx].idx);
-
                        if (rtlpci->rx_ring[rxring_idx].idx ==
                            rtlpci->rxringcount - 1)
                                rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
                rtlpci->rx_ring[rxring_idx].idx = 0;
                for (i = 0; i < rtlpci->rxringcount; i++) {
                        entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
-                       if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+                       if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
                                                      rxring_idx, i))
                                return -ENOMEM;
                }
@@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
 
                for (i = 0; i < rtlpci->rxringcount; i++) {
                        entry = &rtlpci->rx_ring[rxring_idx].desc[i];
-                       if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+                       if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
                                                      rxring_idx, i))
                                return -ENOMEM;
                }
index 22bcb4e12e2a1318fc1802fb3c5ff6b2cb4acf92..d8c10764f13061fd355e0e36abff7c83d3a458a5 100644 (file)
@@ -88,10 +88,8 @@ struct netfront_cb {
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
 struct netfront_stats {
-       u64                     rx_packets;
-       u64                     tx_packets;
-       u64                     rx_bytes;
-       u64                     tx_bytes;
+       u64                     packets;
+       u64                     bytes;
        struct u64_stats_sync   syncp;
 };
 
@@ -160,7 +158,8 @@ struct netfront_info {
        struct netfront_queue *queues;
 
        /* Statistics */
-       struct netfront_stats __percpu *stats;
+       struct netfront_stats __percpu *rx_stats;
+       struct netfront_stats __percpu *tx_stats;
 
        atomic_t rx_gso_checksum_fixup;
 };
@@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned short id;
        struct netfront_info *np = netdev_priv(dev);
-       struct netfront_stats *stats = this_cpu_ptr(np->stats);
+       struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
        struct xen_netif_tx_request *tx;
        char *data = skb->data;
        RING_IDX i;
@@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (notify)
                notify_remote_via_irq(queue->tx_irq);
 
-       u64_stats_update_begin(&stats->syncp);
-       stats->tx_bytes += skb->len;
-       stats->tx_packets++;
-       u64_stats_update_end(&stats->syncp);
+       u64_stats_update_begin(&tx_stats->syncp);
+       tx_stats->bytes += skb->len;
+       tx_stats->packets++;
+       u64_stats_update_end(&tx_stats->syncp);
 
        /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
        xennet_tx_buf_gc(queue);
@@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 static int handle_incoming_queue(struct netfront_queue *queue,
                                 struct sk_buff_head *rxq)
 {
-       struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+       struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
        int packets_dropped = 0;
        struct sk_buff *skb;
 
@@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
                        continue;
                }
 
-               u64_stats_update_begin(&stats->syncp);
-               stats->rx_packets++;
-               stats->rx_bytes += skb->len;
-               u64_stats_update_end(&stats->syncp);
+               u64_stats_update_begin(&rx_stats->syncp);
+               rx_stats->packets++;
+               rx_stats->bytes += skb->len;
+               u64_stats_update_end(&rx_stats->syncp);
 
                /* Pass it up. */
                napi_gro_receive(&queue->napi, skb);
@@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+               struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
+               struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
                u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
                unsigned int start;
 
                do {
-                       start = u64_stats_fetch_begin_irq(&stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+                       tx_packets = tx_stats->packets;
+                       tx_bytes = tx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
 
-                       rx_packets = stats->rx_packets;
-                       tx_packets = stats->tx_packets;
-                       rx_bytes = stats->rx_bytes;
-                       tx_bytes = stats->tx_bytes;
-               } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+               do {
+                       start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+                       rx_packets = rx_stats->packets;
+                       rx_bytes = rx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
 
                tot->rx_packets += rx_packets;
                tot->tx_packets += tx_packets;
@@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = {
 #endif
 };
 
+static void xennet_free_netdev(struct net_device *netdev)
+{
+       struct netfront_info *np = netdev_priv(netdev);
+
+       free_percpu(np->rx_stats);
+       free_percpu(np->tx_stats);
+       free_netdev(netdev);
+}
+
 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
 {
        int err;
@@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        np->queues = NULL;
 
        err = -ENOMEM;
-       np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
-       if (np->stats == NULL)
+       np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+       if (np->rx_stats == NULL)
+               goto exit;
+       np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+       if (np->tx_stats == NULL)
                goto exit;
 
        netdev->netdev_ops      = &xennet_netdev_ops;
@@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        return netdev;
 
  exit:
-       free_netdev(netdev);
+       xennet_free_netdev(netdev);
        return ERR_PTR(err);
 }
 
@@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev,
        return 0;
 
  fail:
-       free_netdev(netdev);
+       xennet_free_netdev(netdev);
        dev_set_drvdata(&dev->dev, NULL);
        return err;
 }
@@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev)
                info->queues = NULL;
        }
 
-       free_percpu(info->stats);
-
-       free_netdev(info->netdev);
+       xennet_free_netdev(info->netdev);
 
        return 0;
 }
index 91e97ec0141892cbf4d1676480d5fda3223b0e6b..4d41bf75c23318577638fa45493aab748e0473a7 100644 (file)
@@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id)
  */
 static inline int ap_test_config_domain(unsigned int domain)
 {
-       if (!ap_configuration)
-               return 1;
-       return ap_test_config(ap_configuration->aqm, domain);
+       if (!ap_configuration)    /* QCI not supported */
+               if (domain < 16)
+                       return 1; /* then domains 0...15 are configured */
+               else
+                       return 0;
+       else
+               return ap_test_config(ap_configuration->aqm, domain);
 }
 
 /**
index 55f6774f706f729b92fb2045abb7f9a33740c2b4..aebde3289c50de6722062dfdea21fa1c549090cd 100644 (file)
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                goto reject;
        }
        if (!strncmp("=All", text_ptr, 4)) {
-               cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+               cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
        } else if (!strncmp("=iqn.", text_ptr, 5) ||
                   !strncmp("=eui.", text_ptr, 5)) {
-               cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+               cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
        } else {
                pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
                goto reject;
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                return -ENOMEM;
        }
        /*
-        * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+        * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
         * explicit case..
         */
-       if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+       if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
                text_ptr = strchr(text_in, '=');
                if (!text_ptr) {
                        pr_err("Unable to locate '=' string in text_in:"
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
 
        spin_lock(&tiqn_lock);
        list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
-               if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+               if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
                     strcmp(tiqn->tiqn, text_ptr)) {
                        continue;
                }
@@ -3512,7 +3512,7 @@ eob:
                if (end_of_buf)
                        break;
 
-               if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+               if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
                        break;
        }
        spin_unlock(&tiqn_lock);
index 09a522bae222d190ec92e157a42f13d2e361da4a..cbcff38ac9b7d30cf5b21882efeb34bb4fb39641 100644 (file)
@@ -135,8 +135,8 @@ enum cmd_flags_table {
        ICF_CONTIG_MEMORY                       = 0x00000020,
        ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
        ICF_OOO_CMDSN                           = 0x00000080,
-       IFC_SENDTARGETS_ALL                     = 0x00000100,
-       IFC_SENDTARGETS_SINGLE                  = 0x00000200,
+       ICF_SENDTARGETS_ALL                     = 0x00000100,
+       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
 };
 
 /* struct iscsi_cmd->i_state */
index 7653cfb027a200cbec0dd51c95047708837c7227..58f49ff69b1424bf5feb33ed64eba495d8826851 100644 (file)
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 }
 EXPORT_SYMBOL(se_dev_set_queue_depth);
 
-int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
-{
-       int block_size = dev->dev_attrib.block_size;
-
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device"
-                       " fabric_max_sectors while export_count is %d\n",
-                       dev, dev->export_count);
-               return -EINVAL;
-       }
-       if (!fabric_max_sectors) {
-               pr_err("dev[%p]: Illegal ZERO value for"
-                       " fabric_max_sectors\n", dev);
-               return -EINVAL;
-       }
-       if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
-               pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
-                       " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
-                               DA_STATUS_MAX_SECTORS_MIN);
-               return -EINVAL;
-       }
-       if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
-               pr_err("dev[%p]: Passed fabric_max_sectors: %u"
-                       " greater than DA_STATUS_MAX_SECTORS_MAX:"
-                       " %u\n", dev, fabric_max_sectors,
-                       DA_STATUS_MAX_SECTORS_MAX);
-               return -EINVAL;
-       }
-       /*
-        * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
-        */
-       if (!block_size) {
-               block_size = 512;
-               pr_warn("Defaulting to 512 for zero block_size\n");
-       }
-       fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
-                                                     block_size);
-
-       dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
-       pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
-                       dev, fabric_max_sectors);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
-
 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
 {
        if (dev->export_count) {
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
                        dev, dev->export_count);
                return -EINVAL;
        }
-       if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+       if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
                pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
-                       " greater than fabric_max_sectors: %u\n", dev,
-                       optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+                       " greater than hw_max_sectors: %u\n", dev,
+                       optimal_sectors, dev->dev_attrib.hw_max_sectors);
                return -EINVAL;
        }
 
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        dev->dev_attrib.unmap_granularity_alignment =
                                DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
-       dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
-       dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
 
        xcopy_lun = &dev->xcopy_lun;
        xcopy_lun->lun_se_dev = dev;
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_max_sectors =
                se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
                                         dev->dev_attrib.hw_block_size);
+       dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 
        dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
        dev->creation_time = get_jiffies_64();
index c2aea099ea4adf7c0ee60ac7949fa02089162932..d836de200a03bcf24be54004df89c7d6d5039030 100644 (file)
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        struct fd_prot fd_prot;
        sense_reason_t rc;
        int ret = 0;
-
+       /*
+        * We are currently limited by the number of iovecs (2048) per
+        * single vfs_[writev,readv] call.
+        */
+       if (cmd->data_length > FD_MAX_BYTES) {
+               pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+                      "FD_MAX_BYTES: %u iovec count limitiation\n",
+                       cmd->data_length, FD_MAX_BYTES);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        /*
         * Call vectorized fileio functions to map struct scatterlist
         * physical memory addresses to struct iovec virtual memory.
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = {
        &fileio_dev_attrib_hw_block_size.attr,
        &fileio_dev_attrib_block_size.attr,
        &fileio_dev_attrib_hw_max_sectors.attr,
-       &fileio_dev_attrib_fabric_max_sectors.attr,
        &fileio_dev_attrib_optimal_sectors.attr,
        &fileio_dev_attrib_hw_queue_depth.attr,
        &fileio_dev_attrib_queue_depth.attr,
index 3efff94fbd9788838f565218965d180b78a67604..78346b850968ed8da28d88f35cf6a3ac15512a1b 100644 (file)
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev)
        q = bdev_get_queue(bd);
 
        dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
-       dev->dev_attrib.hw_max_sectors = UINT_MAX;
+       dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
        /*
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = {
        &iblock_dev_attrib_hw_block_size.attr,
        &iblock_dev_attrib_block_size.attr,
        &iblock_dev_attrib_hw_max_sectors.attr,
-       &iblock_dev_attrib_fabric_max_sectors.attr,
        &iblock_dev_attrib_optimal_sectors.attr,
        &iblock_dev_attrib_hw_queue_depth.attr,
        &iblock_dev_attrib_queue_depth.attr,
index d56f2aaba9af9a6bb4b89d5c1080d426cba84e63..283cf786ef98be3d0594e847cc9749a072986b80 100644 (file)
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
 
                        return 0;
                }
+       } else if (we && registered_nexus) {
+               /*
+                * Reads are allowed for Write Exclusive locks
+                * from all registrants.
+                */
+               if (cmd->data_direction == DMA_FROM_DEVICE) {
+                       pr_debug("Allowing READ CDB: 0x%02x for %s"
+                               " reservation\n", cdb[0],
+                               core_scsi3_pr_dump_type(pr_reg_type));
+
+                       return 0;
+               }
        }
        pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
                " for %s reservation\n", transport_dump_cmd_direction(cmd),
index 60ebd170a561943be8bde26cd40112bbd22d1e8e..98e83ac5661bcfe5b3b7b98d6c9fbf21bb27c14e 100644 (file)
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
        &rd_mcp_dev_attrib_hw_block_size.attr,
        &rd_mcp_dev_attrib_block_size.attr,
        &rd_mcp_dev_attrib_hw_max_sectors.attr,
-       &rd_mcp_dev_attrib_fabric_max_sectors.attr,
        &rd_mcp_dev_attrib_optimal_sectors.attr,
        &rd_mcp_dev_attrib_hw_queue_depth.attr,
        &rd_mcp_dev_attrib_queue_depth.attr,
index 11bea1952435a397172ce69804a1088567bde03f..cd4bed7b27579b14a0f6e517eed23dacb6c1fe02 100644 (file)
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 
        if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
                unsigned long long end_lba;
-
-               if (sectors > dev->dev_attrib.fabric_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds fabric_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.fabric_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
-               if (sectors > dev->dev_attrib.hw_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds backend hw_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.hw_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
 check_lba:
                end_lba = dev->transport->get_blocks(dev) + 1;
                if (cmd->t_task_lba + sectors > end_lba) {
index 1307600fe7264cb55234b6b8e88d8cc15878c799..4c71657da56ab3cdc96b5c1f8d784722f10e2c1d 100644 (file)
@@ -505,7 +505,6 @@ static sense_reason_t
 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
-       u32 max_sectors;
        int have_tp = 0;
        int opt, min;
 
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
        /*
         * Set MAXIMUM TRANSFER LENGTH
         */
-       max_sectors = min(dev->dev_attrib.fabric_max_sectors,
-                         dev->dev_attrib.hw_max_sectors);
-       put_unaligned_be32(max_sectors, &buf[8]);
+       put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
 
        /*
         * Set OPTIMAL TRANSFER LENGTH
index 8bfa61c9693dbef6fe6267e16a48c566eae6ac4a..1157b559683b1ff437f9eba74626a90f517d0a1e 100644 (file)
@@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
        &tcmu_dev_attrib_hw_block_size.attr,
        &tcmu_dev_attrib_block_size.attr,
        &tcmu_dev_attrib_hw_max_sectors.attr,
-       &tcmu_dev_attrib_fabric_max_sectors.attr,
        &tcmu_dev_attrib_optimal_sectors.attr,
        &tcmu_dev_attrib_hw_queue_depth.attr,
        &tcmu_dev_attrib_queue_depth.attr,
index c1188ac053c9650ce163a3e9e37f34a877959dc2..2ccbc0788353e9488e3f730de80d52ecafaf9082 100644 (file)
@@ -608,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev)
        regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
        regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
        data->mode = THERMAL_DEVICE_DISABLED;
+       clk_disable_unprepare(data->thermal_clk);
 
        return 0;
 }
@@ -617,6 +618,7 @@ static int imx_thermal_resume(struct device *dev)
        struct imx_thermal_data *data = dev_get_drvdata(dev);
        struct regmap *map = data->tempmon;
 
+       clk_prepare_enable(data->thermal_clk);
        /* Enabled thermal sensor after resume */
        regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
        regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
index 231cabc16e160e7318b4339a9d3d5210ff8b2ec3..2c2ec7666eb182c44c24225609ab9ecd723ea04a 100644 (file)
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
                        continue;
 
                result = acpi_bus_get_device(trt->source, &adev);
-               if (!result)
-                       acpi_create_platform_device(adev);
-               else
+               if (result)
                        pr_warn("Failed to get source ACPI device\n");
 
                result = acpi_bus_get_device(trt->target, &adev);
-               if (!result)
-                       acpi_create_platform_device(adev);
-               else
+               if (result)
                        pr_warn("Failed to get target ACPI device\n");
        }
 
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
 
                if (art->source) {
                        result = acpi_bus_get_device(art->source, &adev);
-                       if (!result)
-                               acpi_create_platform_device(adev);
-                       else
+                       if (result)
                                pr_warn("Failed to get source ACPI device\n");
                }
                if (art->target) {
                        result = acpi_bus_get_device(art->target, &adev);
-                       if (!result)
-                               acpi_create_platform_device(adev);
-                       else
+                       if (result)
                                pr_warn("Failed to get source ACPI device\n");
                }
        }
index 31bb553aac2633e6ca96ad6c115fe409c610bc35..0fe5dbbea9687053b835ee12eae553930b5ce1f9 100644 (file)
@@ -130,6 +130,8 @@ static int proc_thermal_add(struct device *dev,
        int ret;
 
        adev = ACPI_COMPANION(dev);
+       if (!adev)
+               return -ENODEV;
 
        status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
        if (ACPI_FAILURE(status))
index e145b66df444e65bb5cc4f9d7e7ed4dce7f76435..d717f3dab6f1410fc955daefb0497c2096298b56 100644 (file)
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
  *
  * Return: pointer to trip points table, NULL otherwise
  */
-const struct thermal_trip * const
+const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *tz)
 {
        struct __thermal_zone *data = tz->devdata;
index 8803e693fe6868a620b76abda347e0e27645d6d3..2580a4872f90febeb5af00136e16054bb59e4903 100644 (file)
@@ -63,7 +63,7 @@ struct rcar_thermal_priv {
        struct mutex lock;
        struct list_head list;
        int id;
-       int ctemp;
+       u32 ctemp;
 };
 
 #define rcar_thermal_for_each_priv(pos, common)        \
@@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
 {
        struct device *dev = rcar_priv_to_dev(priv);
        int i;
-       int ctemp, old, new;
+       u32 ctemp, old, new;
        int ret = -EINVAL;
 
        mutex_lock(&priv->lock);
@@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
        int i;
        int ret = -ENODEV;
        int idle = IDLE_INTERVAL;
+       u32 enr_bits = 0;
 
        common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
        if (!common)
@@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
 
                /*
                 * platform has IRQ support.
-                * Then, drier use common register
+                * Then, driver uses common registers
                 */
 
                ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
@@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                if (IS_ERR(common->base))
                        return PTR_ERR(common->base);
 
-               /* enable temperature comparation */
-               rcar_thermal_common_write(common, ENR, 0x00030303);
-
                idle = 0; /* polling delay is not needed */
        }
 
@@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                        rcar_thermal_irq_enable(priv);
 
                list_move_tail(&priv->list, &common->head);
+
+               /* update ENR bits */
+               enr_bits |= 3 << (i * 8);
        }
 
+       /* enable temperature comparation */
+       if (irq)
+               rcar_thermal_common_write(common, ENR, enr_bits);
+
        platform_set_drvdata(pdev, common);
 
        dev_info(dev, "%d sensor probed\n", i);
index 9083e75206236c1953e4ad5d7858fbc5f164fad0..0531c752fbbb6680c40e939ad2a14fdc1830f357 100644 (file)
@@ -91,7 +91,7 @@ int of_parse_thermal_zones(void);
 void of_thermal_destroy_zones(void);
 int of_thermal_get_ntrips(struct thermal_zone_device *);
 bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
-const struct thermal_trip * const
+const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *);
 #else
 static inline int of_parse_thermal_zones(void) { return 0; }
@@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
 {
        return 0;
 }
-static inline const struct thermal_trip * const
+static inline const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *tz)
 {
        return NULL;
index 01c01cb3933fdfc5bce7e9c457d128d2e8f460e4..d695b1673ae532d9ac873bdc5661ccab84995c04 100644 (file)
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
        return 0;
 }
 
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+       switch (attr) {
+       case VIRTIO_SCSI_S_SIMPLE:
+               return TCM_SIMPLE_TAG;
+       case VIRTIO_SCSI_S_ORDERED:
+               return TCM_ORDERED_TAG;
+       case VIRTIO_SCSI_S_HEAD:
+               return TCM_HEAD_TAG;
+       case VIRTIO_SCSI_S_ACA:
+               return TCM_ACA_TAG;
+       default:
+               break;
+       }
+       return TCM_SIMPLE_TAG;
+}
+
 static void tcm_vhost_submission_work(struct work_struct *work)
 {
        struct tcm_vhost_cmd *cmd =
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
                        cmd->tvc_lun, cmd->tvc_exp_data_len,
-                       cmd->tvc_task_attr, cmd->tvc_data_direction,
-                       TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
-                       NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
+                       vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
+                       cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
+                       sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+                       cmd->tvc_prot_sgl_count);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
index 1c29bd19e3d5fe9954153a8b821058a1be5ff6d5..0e5fde1d3ffbe5a152035f33063afa98bf84f33e 100644 (file)
@@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
                err = broadsheet_spiflash_read_range(par, start_sector_addr,
                                                data_start_addr, sector_buffer);
                if (err)
-                       return err;
+                       goto out;
        }
 
        /* now we copy our data into the right place in the sector buffer */
@@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
                err = broadsheet_spiflash_read_range(par, tail_start_addr,
                        tail_len, sector_buffer + tail_start_addr);
                if (err)
-                       return err;
+                       goto out;
        }
 
        /* if we got here we have the full sector that we want to rewrite. */
@@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
        /* first erase the sector */
        err = broadsheet_spiflash_erase_sector(par, start_sector_addr);
        if (err)
-               return err;
+               goto out;
 
        /* now write it */
        err = broadsheet_spiflash_write_sector(par, start_sector_addr,
                                        sector_buffer, sector_size);
+out:
+       kfree(sector_buffer);
        return err;
 }
 
index 92cac803dee3c2261655a34a45a6a0c2d170636f..1085c0432158c02038aba6f961ea7e300f7f950b 100644 (file)
@@ -402,7 +402,7 @@ static int __init simplefb_init(void)
        if (ret)
                return ret;
 
-       if (IS_ENABLED(CONFIG_OF) && of_chosen) {
+       if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
                for_each_child_of_node(of_chosen, np) {
                        if (of_device_is_compatible(np, "simple-framebuffer"))
                                of_platform_device_create(np, NULL, NULL);
index ba1107977f2ecafa96cafc04f6498b3fb79a3145..ed19a7d622fa35decaa08b10e83b8bdee8712419 100644 (file)
@@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req)
        req->in.h.pid = current->pid;
 }
 
+void fuse_set_initialized(struct fuse_conn *fc)
+{
+       /* Make sure stores before this are seen on another CPU */
+       smp_wmb();
+       fc->initialized = 1;
+}
+
 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
 {
        return !fc->initialized || (for_background && fc->blocked);
@@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
                if (intr)
                        goto out;
        }
+       /* Matches smp_wmb() in fuse_set_initialized() */
+       smp_rmb();
 
        err = -ENOTCONN;
        if (!fc->connected)
@@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
 
        atomic_inc(&fc->num_waiting);
        wait_event(fc->blocked_waitq, fc->initialized);
+       /* Matches smp_wmb() in fuse_set_initialized() */
+       smp_rmb();
        req = fuse_request_alloc(0);
        if (!req)
                req = get_reserved_req(fc, file);
@@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
 }
 EXPORT_SYMBOL_GPL(fuse_request_send);
 
+static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
+{
+       if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
+               args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
+
+       if (fc->minor < 9) {
+               switch (args->in.h.opcode) {
+               case FUSE_LOOKUP:
+               case FUSE_CREATE:
+               case FUSE_MKNOD:
+               case FUSE_MKDIR:
+               case FUSE_SYMLINK:
+               case FUSE_LINK:
+                       args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+                       break;
+               case FUSE_GETATTR:
+               case FUSE_SETATTR:
+                       args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+                       break;
+               }
+       }
+       if (fc->minor < 12) {
+               switch (args->in.h.opcode) {
+               case FUSE_CREATE:
+                       args->in.args[0].size = sizeof(struct fuse_open_in);
+                       break;
+               case FUSE_MKNOD:
+                       args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
+                       break;
+               }
+       }
+}
+
 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
 {
        struct fuse_req *req;
@@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
+       /* Needs to be done after fuse_get_req() so that fc->minor is valid */
+       fuse_adjust_compat(fc, args);
+
        req->in.h.opcode = args->in.h.opcode;
        req->in.h.nodeid = args->in.h.nodeid;
        req->in.numargs = args->in.numargs;
@@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
        if (fc->connected) {
                fc->connected = 0;
                fc->blocked = 0;
-               fc->initialized = 1;
+               fuse_set_initialized(fc);
                end_io_requests(fc);
                end_queued_requests(fc);
                end_polls(fc);
@@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
                spin_lock(&fc->lock);
                fc->connected = 0;
                fc->blocked = 0;
-               fc->initialized = 1;
+               fuse_set_initialized(fc);
                end_queued_requests(fc);
                end_polls(fc);
                wake_up_all(&fc->blocked_waitq);
index 252b8a5de8b57f71b841d1fc64c9f48e78b641c2..08e7b1a9d5d0edaca8b94ef386d9200078958df3 100644 (file)
@@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
        args->in.args[0].size = name->len + 1;
        args->in.args[0].value = name->name;
        args->out.numargs = 1;
-       if (fc->minor < 9)
-               args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
-       else
-               args->out.args[0].size = sizeof(struct fuse_entry_out);
+       args->out.args[0].size = sizeof(struct fuse_entry_out);
        args->out.args[0].value = outarg;
 }
 
@@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
        args.in.h.opcode = FUSE_CREATE;
        args.in.h.nodeid = get_node_id(dir);
        args.in.numargs = 2;
-       args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
-                                               sizeof(inarg);
+       args.in.args[0].size = sizeof(inarg);
        args.in.args[0].value = &inarg;
        args.in.args[1].size = entry->d_name.len + 1;
        args.in.args[1].value = entry->d_name.name;
        args.out.numargs = 2;
-       if (fc->minor < 9)
-               args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
-       else
-               args.out.args[0].size = sizeof(outentry);
+       args.out.args[0].size = sizeof(outentry);
        args.out.args[0].value = &outentry;
        args.out.args[1].size = sizeof(outopen);
        args.out.args[1].value = &outopen;
@@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
        memset(&outarg, 0, sizeof(outarg));
        args->in.h.nodeid = get_node_id(dir);
        args->out.numargs = 1;
-       if (fc->minor < 9)
-               args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
-       else
-               args->out.args[0].size = sizeof(outarg);
+       args->out.args[0].size = sizeof(outarg);
        args->out.args[0].value = &outarg;
        err = fuse_simple_request(fc, args);
        if (err)
@@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
        inarg.umask = current_umask();
        args.in.h.opcode = FUSE_MKNOD;
        args.in.numargs = 2;
-       args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
-                                               sizeof(inarg);
+       args.in.args[0].size = sizeof(inarg);
        args.in.args[0].value = &inarg;
        args.in.args[1].size = entry->d_name.len + 1;
        args.in.args[1].value = entry->d_name.name;
@@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
        args.in.args[0].size = sizeof(inarg);
        args.in.args[0].value = &inarg;
        args.out.numargs = 1;
-       if (fc->minor < 9)
-               args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
-       else
-               args.out.args[0].size = sizeof(outarg);
+       args.out.args[0].size = sizeof(outarg);
        args.out.args[0].value = &outarg;
        err = fuse_simple_request(fc, &args);
        if (!err) {
@@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
        args->in.args[0].size = sizeof(*inarg_p);
        args->in.args[0].value = inarg_p;
        args->out.numargs = 1;
-       if (fc->minor < 9)
-               args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
-       else
-               args->out.args[0].size = sizeof(*outarg_p);
+       args->out.args[0].size = sizeof(*outarg_p);
        args->out.args[0].value = outarg_p;
 }
 
index e0fc6725d1d0d66a4c3c7dce595239631ba353b1..1cdfb07c1376b4f4b5633e86fdbdfc4320953de2 100644 (file)
@@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
 int fuse_do_setattr(struct inode *inode, struct iattr *attr,
                    struct file *file);
 
+void fuse_set_initialized(struct fuse_conn *fc);
+
 #endif /* _FS_FUSE_I_H */
index 6749109f255da69a5c24825aab1f2a25140fbb47..f38256e4476ed2a9101480342bcd0fd90a99fd38 100644 (file)
@@ -424,8 +424,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
        args.in.h.opcode = FUSE_STATFS;
        args.in.h.nodeid = get_node_id(dentry->d_inode);
        args.out.numargs = 1;
-       args.out.args[0].size =
-               fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
+       args.out.args[0].size = sizeof(outarg);
        args.out.args[0].value = &outarg;
        err = fuse_simple_request(fc, &args);
        if (!err)
@@ -898,7 +897,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                fc->max_write = max_t(unsigned, 4096, fc->max_write);
                fc->conn_init = 1;
        }
-       fc->initialized = 1;
+       fuse_set_initialized(fc);
        wake_up_all(&fc->blocked_waitq);
 }
 
index e94c887da2d72f7043ed010bfa8675b4ad825bb6..55505cbe11afa165ec90ec934301c15a1b9a4314 100644 (file)
@@ -138,10 +138,6 @@ lockd(void *vrqstp)
 
        dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
 
-       if (!nlm_timeout)
-               nlm_timeout = LOCKD_DFLT_TIMEO;
-       nlmsvc_timeout = nlm_timeout * HZ;
-
        /*
         * The main request loop. We don't terminate until the last
         * NFS mount or NFS daemon has gone away.
@@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void)
                printk(KERN_WARNING
                        "lockd_up: no pid, %d users??\n", nlmsvc_users);
 
+       if (!nlm_timeout)
+               nlm_timeout = LOCKD_DFLT_TIMEO;
+       nlmsvc_timeout = nlm_timeout * HZ;
+
        serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
        if (!serv) {
                printk(KERN_WARNING "lockd_up: create service failed\n");
index 735b8d3fa78c92bf746aff05475be1fa2a82abd6..59e2f905e4ffea324dbf44faf1b666974adc6c23 100644 (file)
@@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp)
                        break;
        }
        trace_generic_delete_lease(inode, fl);
-       if (fl)
+       if (fl && IS_LEASE(fl))
                error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
        spin_unlock(&inode->i_lock);
        locks_dispose_list(&dispose);
index 03311259b0c45c88de37122cffc0f28c8f6c7e63..953daa44a28232d6863da375e59d44a0b42f49b6 100644 (file)
@@ -228,6 +228,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
        kfree(clp->cl_serverowner);
        kfree(clp->cl_serverscope);
        kfree(clp->cl_implid);
+       kfree(clp->cl_owner_id);
 }
 
 void nfs4_free_client(struct nfs_client *clp)
@@ -452,6 +453,14 @@ static void nfs4_swap_callback_idents(struct nfs_client *keep,
        spin_unlock(&nn->nfs_client_lock);
 }
 
+static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
+               const struct nfs_client *clp2)
+{
+       if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL)
+               return true;
+       return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0;
+}
+
 /**
  * nfs40_walk_client_list - Find server that recognizes a client ID
  *
@@ -483,9 +492,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
                if (pos->rpc_ops != new->rpc_ops)
                        continue;
 
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
                if (pos->cl_minorversion != new->cl_minorversion)
                        continue;
 
@@ -510,6 +516,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
                if (pos->cl_clientid != new->cl_clientid)
                        continue;
 
+               if (!nfs4_match_client_owner_id(pos, new))
+                       continue;
+
                atomic_inc(&pos->cl_count);
                spin_unlock(&nn->nfs_client_lock);
 
@@ -566,20 +575,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
 }
 
 /*
- * Returns true if the server owners match
+ * Returns true if the server major ids match
  */
 static bool
-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
+nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
 {
        struct nfs41_server_owner *o1 = a->cl_serverowner;
        struct nfs41_server_owner *o2 = b->cl_serverowner;
 
-       if (o1->minor_id != o2->minor_id) {
-               dprintk("NFS: --> %s server owner minor IDs do not match\n",
-                       __func__);
-               return false;
-       }
-
        if (o1->major_id_sz != o2->major_id_sz)
                goto out_major_mismatch;
        if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
@@ -621,9 +624,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (pos->rpc_ops != new->rpc_ops)
                        continue;
 
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
                if (pos->cl_minorversion != new->cl_minorversion)
                        continue;
 
@@ -654,7 +654,19 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (!nfs4_match_clientids(pos, new))
                        continue;
 
-               if (!nfs4_match_serverowners(pos, new))
+               /*
+                * Note that session trunking is just a special subcase of
+                * client id trunking. In either case, we want to fall back
+                * to using the existing nfs_client.
+                */
+               if (!nfs4_check_clientid_trunking(pos, new))
+                       continue;
+
+               /* Unlike NFSv4.0, we know that NFSv4.1 always uses the
+                * uniform string, however someone might switch the
+                * uniquifier string on us.
+                */
+               if (!nfs4_match_client_owner_id(pos, new))
                        continue;
 
                atomic_inc(&pos->cl_count);
index e7f8d5ff2581a98269a262998beb43ccaca23e3c..c347705b016104de8b0360f163e87c19d9b4d78c 100644 (file)
@@ -1117,8 +1117,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
                return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
-       if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
-               return 0;
        if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
                return 0;
        nfs_mark_delegation_referenced(delegation);
@@ -4917,11 +4915,14 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
 }
 
 static unsigned int
-nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
+nfs4_init_nonuniform_client_string(struct nfs_client *clp,
                                   char *buf, size_t len)
 {
        unsigned int result;
 
+       if (clp->cl_owner_id != NULL)
+               return strlcpy(buf, clp->cl_owner_id, len);
+
        rcu_read_lock();
        result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
                                clp->cl_ipaddr,
@@ -4930,24 +4931,32 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
                                rpc_peeraddr2str(clp->cl_rpcclient,
                                                        RPC_DISPLAY_PROTO));
        rcu_read_unlock();
+       clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
        return result;
 }
 
 static unsigned int
-nfs4_init_uniform_client_string(const struct nfs_client *clp,
+nfs4_init_uniform_client_string(struct nfs_client *clp,
                                char *buf, size_t len)
 {
        const char *nodename = clp->cl_rpcclient->cl_nodename;
+       unsigned int result;
+
+       if (clp->cl_owner_id != NULL)
+               return strlcpy(buf, clp->cl_owner_id, len);
 
        if (nfs4_client_id_uniquifier[0] != '\0')
-               return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
+               result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
                                clp->rpc_ops->version,
                                clp->cl_minorversion,
                                nfs4_client_id_uniquifier,
                                nodename);
-       return scnprintf(buf, len, "Linux NFSv%u.%u %s",
+       else
+               result = scnprintf(buf, len, "Linux NFSv%u.%u %s",
                                clp->rpc_ops->version, clp->cl_minorversion,
                                nodename);
+       clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
+       return result;
 }
 
 /*
index 08848050922e613f28a1d8f7e4b3ad4c34b463be..db284bff29dcceb39360d458cec3a194745955f8 100644 (file)
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 
 static inline void __tlb_reset_range(struct mmu_gather *tlb)
 {
-       tlb->start = TASK_SIZE;
-       tlb->end = 0;
+       if (tlb->fullmm) {
+               tlb->start = tlb->end = ~0;
+       } else {
+               tlb->start = TASK_SIZE;
+               tlb->end = 0;
+       }
 }
 
 /*
index 8aded9ab2e4e89ddb66e5920c0819bade6fb0760..5735e7130d630f94fe62d3e0a7a7078434c8ee12 100644 (file)
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
        unsigned long           flags;          /* BLK_MQ_F_* flags */
 
        struct request_queue    *queue;
-       unsigned int            queue_num;
        struct blk_flush_queue  *fq;
 
        void                    *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
        unsigned long           dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
 
        unsigned int            numa_node;
-       unsigned int            cmd_size;       /* per-request extra data */
+       unsigned int            queue_num;
 
        atomic_t                nr_active;
 
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
 
+int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, int error);
 void __blk_mq_end_request(struct request *rq, int error);
 
 void blk_mq_requeue_request(struct request *rq);
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_abort_requeue_list(struct request_queue *q);
 void blk_mq_complete_request(struct request *rq);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
                void *priv);
+void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
 
 /*
  * Driver command data is immediately after the request. So subtract request
index 445d59231bc4cc242e2aa0e1ca073e3c320af56c..c294e3e25e37a50a953a4a0bb3cb1aa66ba904d9 100644 (file)
@@ -190,6 +190,7 @@ enum rq_flag_bits {
        __REQ_PM,               /* runtime pm request */
        __REQ_HASHED,           /* on IO scheduler merge hash */
        __REQ_MQ_INFLIGHT,      /* track inflight for MQ */
+       __REQ_NO_TIMEOUT,       /* requests may never expire */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -243,5 +244,6 @@ enum rq_flag_bits {
 #define REQ_PM                 (1ULL << __REQ_PM)
 #define REQ_HASHED             (1ULL << __REQ_HASHED)
 #define REQ_MQ_INFLIGHT                (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_NO_TIMEOUT         (1ULL << __REQ_NO_TIMEOUT)
 
 #endif /* __LINUX_BLK_TYPES_H */
index a1c81f80978ee4b38bbbb1cdd781c7afa0362c5e..33063f872ee3cd698a233a0f0defa67aa1b6bd01 100644 (file)
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
        }
 }
 
-static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
 {
        switch (size) {
        case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
 /*
  * Prevent the compiler from merging or refetching reads or writes. The
  * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
  * compiler is aware of some particular ordering.  One way to make the
  * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
  *
  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  * data types like structs or unions. If the size of the accessed data
  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and ASSIGN_ONCE()  will fall back to memcpy and print a
+ * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
  * compile-time warning.
  *
  * Their two major use cases are: (1) Mediating communication between
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
 #define READ_ONCE(x) \
        ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
 
-#define ASSIGN_ONCE(val, x) \
-       ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
+#define WRITE_ONCE(x, val) \
+       ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
 
 #endif /* __KERNEL__ */
 
index 375af80bde7d7c90bb1c09efb3edc297dbe4d864..f767a0de611f8a726df957c6595d84df92d8efb3 100644 (file)
@@ -137,6 +137,7 @@ struct sdhci_host {
 #define SDHCI_SDR104_NEEDS_TUNING (1<<10)      /* SDR104/HS200 needs tuning */
 #define SDHCI_USING_RETUNING_TIMER (1<<11)     /* Host is using a retuning timer for the card */
 #define SDHCI_USE_64_BIT_DMA   (1<<12) /* Use 64-bit DMA */
+#define SDHCI_HS400_TUNING     (1<<13) /* Tuning for HS400 */
 
        unsigned int version;   /* SDHCI spec. version */
 
index 679e6e90aa4c2b1a2e9ea9c3fde52e12ea389b91..52fd8e8694cfade5e844d52a70b191c3183f60a2 100644 (file)
@@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     3. Update dev->stats asynchronously and atomically, and define
  *        neither operation.
  *
- * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
  *     If device support VLAN filtering this function is called when a
  *     VLAN id is registered.
  *
- * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
  *     If device support VLAN filtering this function is called when a
  *     VLAN id is unregistered.
  *
@@ -2085,7 +2085,7 @@ extern rwlock_t                           dev_base_lock;          /* Device list lock */
        list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
 #define for_each_netdev_in_bond_rcu(bond, slave)       \
                for_each_netdev_rcu(&init_net, slave)   \
-                       if (netdev_master_upper_dev_get_rcu(slave) == bond)
+                       if (netdev_master_upper_dev_get_rcu(slave) == (bond))
 #define net_device_entry(lh)   list_entry(lh, struct net_device, dev_list)
 
 static inline struct net_device *next_net_device(struct net_device *dev)
index 1e37fbb78f7afbc57b8ab3c076d66ce7555f3cb0..ddea982355f3be93947dae0b2e30f9e00209920e 100644 (file)
@@ -74,6 +74,9 @@ struct nfs_client {
        /* idmapper */
        struct idmap *          cl_idmap;
 
+       /* Client owner identifier */
+       const char *            cl_owner_id;
+
        /* Our own IP address, as a null-terminated string.
         * This is used to generate the mv0 callback address.
         */
index 430cfaf92285f177d977d11717599bf1ff85b70b..db81c65b8f4857c011a025e5b54026bf7d683f7c 100644 (file)
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int);
 int    se_dev_set_emulate_rest_reord(struct se_device *dev, int);
 int    se_dev_set_queue_depth(struct se_device *, u32);
 int    se_dev_set_max_sectors(struct se_device *, u32);
-int    se_dev_set_fabric_max_sectors(struct se_device *, u32);
 int    se_dev_set_optimal_sectors(struct se_device *, u32);
 int    se_dev_set_block_size(struct se_device *, u32);
 
index 3247d7530107968aa7c1d1957f96790c226845dd..186f7a92357094fbb4735043b2a2b8c8cd884af8 100644 (file)
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name
        TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR);           \
        DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors);                 \
        TB_DEV_ATTR_RO(_backend, hw_max_sectors);                       \
-       DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors);                \
-       TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR);   \
        DEF_TB_DEV_ATTRIB(_backend, optimal_sectors);                   \
        TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR);      \
        DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth);                 \
index 397fb635766a96faa94c5b91788ad24fca0d2a34..4a8795a87b9e99f30ee07f43fdce3984ddc658e0 100644 (file)
@@ -77,8 +77,6 @@
 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
 /* Default max_write_same_len, disabled by default */
 #define DA_MAX_WRITE_SAME_LEN                  0
-/* Default max transfer length */
-#define DA_FABRIC_MAX_SECTORS                  8192
 /* Use a model alias based on the configfs backend device name */
 #define DA_EMULATE_MODEL_ALIAS                 0
 /* Emulation for Direct Page Out */
@@ -694,7 +692,6 @@ struct se_dev_attrib {
        u32             hw_block_size;
        u32             block_size;
        u32             hw_max_sectors;
-       u32             fabric_max_sectors;
        u32             optimal_sectors;
        u32             hw_queue_depth;
        u32             queue_depth;
index 3a6dcaa359b768d09bfc58f71c0f9b23582d9f24..f714e863335204a4c3e29525e41605f3a9e0f0f4 100644 (file)
@@ -174,6 +174,10 @@ enum ovs_packet_attr {
        OVS_PACKET_ATTR_USERDATA,    /* OVS_ACTION_ATTR_USERSPACE arg. */
        OVS_PACKET_ATTR_EGRESS_TUN_KEY,  /* Nested OVS_TUNNEL_KEY_ATTR_*
                                            attributes. */
+       OVS_PACKET_ATTR_UNUSED1,
+       OVS_PACKET_ATTR_UNUSED2,
+       OVS_PACKET_ATTR_PROBE,      /* Packet operation is a feature probe,
+                                      error logging should be suppressed. */
        __OVS_PACKET_ATTR_MAX
 };
 
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
new file mode 100644 (file)
index 0000000..b47d9d0
--- /dev/null
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * nmi.h
+ *
+ * NMI callback registration and reason codes.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_NMI_H__
+#define __XEN_PUBLIC_NMI_H__
+
+#include <xen/interface/xen.h>
+
+/*
+ * NMI reason codes:
+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
+ */
+ /* I/O-check error reported via ISA port 0x61, bit 6. */
+#define _XEN_NMIREASON_io_error     0
+#define XEN_NMIREASON_io_error      (1UL << _XEN_NMIREASON_io_error)
+ /* PCI SERR reported via ISA port 0x61, bit 7. */
+#define _XEN_NMIREASON_pci_serr     1
+#define XEN_NMIREASON_pci_serr      (1UL << _XEN_NMIREASON_pci_serr)
+ /* Unknown hardware-generated NMI. */
+#define _XEN_NMIREASON_unknown      2
+#define XEN_NMIREASON_unknown       (1UL << _XEN_NMIREASON_unknown)
+
+/*
+ * long nmi_op(unsigned int cmd, void *arg)
+ * NB. All ops return zero on success, else a negative error code.
+ */
+
+/*
+ * Register NMI callback for this (calling) VCPU. Currently this only makes
+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
+ * arg == pointer to xennmi_callback structure.
+ */
+#define XENNMI_register_callback   0
+struct xennmi_callback {
+    unsigned long handler_address;
+    unsigned long pad;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
+
+/*
+ * Deregister NMI callback for this (calling) VCPU.
+ * arg == NULL.
+ */
+#define XENNMI_unregister_callback 1
+
+#endif /* __XEN_PUBLIC_NMI_H__ */
index 929a733d302e0d438d2f15f8dfaf6e2e4c56c0d0..224e768bdc738da7c47aca41fcc6d9ecd4c190b4 100644 (file)
@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command)
 }
 
 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
-                                  struct ftrace_hash *old_hash)
+                                  struct ftrace_ops_hash *old_hash)
 {
        ops->flags |= FTRACE_OPS_FL_MODIFYING;
-       ops->old_hash.filter_hash = old_hash;
+       ops->old_hash.filter_hash = old_hash->filter_hash;
+       ops->old_hash.notrace_hash = old_hash->notrace_hash;
        ftrace_run_update_code(command);
        ops->old_hash.filter_hash = NULL;
+       ops->old_hash.notrace_hash = NULL;
        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
 }
 
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
 
 static int ftrace_probe_registered;
 
-static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
+static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
 {
        int ret;
        int i;
@@ -3637,6 +3639,7 @@ int
 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                              void *data)
 {
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_func_probe *entry;
        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct ftrace_hash *old_hash = *orig_hash;
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
        mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
+       old_hash_ops.filter_hash = old_hash;
+       /* Probes only have filters */
+       old_hash_ops.notrace_hash = NULL;
+
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
        if (!hash) {
                count = -ENOMEM;
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
 
-       __enable_ftrace_function_probe(old_hash);
+       __enable_ftrace_function_probe(&old_hash_ops);
 
        if (!ret)
                free_ftrace_hash_rcu(old_hash);
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
 }
 
 static void ftrace_ops_update_code(struct ftrace_ops *ops,
-                                  struct ftrace_hash *old_hash)
+                                  struct ftrace_ops_hash *old_hash)
 {
-       if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+       struct ftrace_ops *op;
+
+       if (!ftrace_enabled)
+               return;
+
+       if (ops->flags & FTRACE_OPS_FL_ENABLED) {
                ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
+               return;
+       }
+
+       /*
+        * If this is the shared global_ops filter, then we need to
+        * check if there is another ops that shares it, is enabled.
+        * If so, we still need to run the modify code.
+        */
+       if (ops->func_hash != &global_ops.local_hash)
+               return;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->func_hash == &global_ops.local_hash &&
+                   op->flags & FTRACE_OPS_FL_ENABLED) {
+                       ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
+                       /* Only need to do this once */
+                       return;
+               }
+       } while_for_each_ftrace_op(op);
 }
 
 static int
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
                unsigned long ip, int remove, int reset, int enable)
 {
        struct ftrace_hash **orig_hash;
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_hash *old_hash;
        struct ftrace_hash *hash;
        int ret;
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
 
        mutex_lock(&ftrace_lock);
        old_hash = *orig_hash;
+       old_hash_ops.filter_hash = ops->func_hash->filter_hash;
+       old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
        if (!ret) {
-               ftrace_ops_update_code(ops, old_hash);
+               ftrace_ops_update_code(ops, &old_hash_ops);
                free_ftrace_hash_rcu(old_hash);
        }
        mutex_unlock(&ftrace_lock);
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void)
 int ftrace_regex_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = (struct seq_file *)file->private_data;
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_iterator *iter;
        struct ftrace_hash **orig_hash;
        struct ftrace_hash *old_hash;
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
                mutex_lock(&ftrace_lock);
                old_hash = *orig_hash;
+               old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
+               old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
                ret = ftrace_hash_move(iter->ops, filter_hash,
                                       orig_hash, iter->hash);
                if (!ret) {
-                       ftrace_ops_update_code(iter->ops, old_hash);
+                       ftrace_ops_update_code(iter->ops, &old_hash_ops);
                        free_ftrace_hash_rcu(old_hash);
                }
                mutex_unlock(&ftrace_lock);
index 2e767972e99c2e8791236d10afa717b920543b70..4a9079b9f082fd3bb14e3b46522b1540b001fea1 100644 (file)
@@ -6918,7 +6918,6 @@ void __init trace_init(void)
                        tracepoint_printk = 0;
        }
        tracer_alloc_buffers();
-       init_ftrace_syscalls();
        trace_event_init();     
 }
 
index 366a78a3e61e21a94c06aa96f5b5c02a72c41e78..b03a0ea77b993cf9f175ed7b44fc239832de7def 100644 (file)
@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void)
        return 0;
 }
 
+static __init void
+early_enable_events(struct trace_array *tr, bool disable_first)
+{
+       char *buf = bootup_event_buf;
+       char *token;
+       int ret;
+
+       while (true) {
+               token = strsep(&buf, ",");
+
+               if (!token)
+                       break;
+               if (!*token)
+                       continue;
+
+               /* Restarting syscalls requires that we stop them first */
+               if (disable_first)
+                       ftrace_set_clr_event(tr, token, 0);
+
+               ret = ftrace_set_clr_event(tr, token, 1);
+               if (ret)
+                       pr_warn("Failed to enable trace event: %s\n", token);
+
+               /* Put back the comma to allow this to be called again */
+               if (buf)
+                       *(buf - 1) = ',';
+       }
+}
+
 static __init int event_trace_enable(void)
 {
        struct trace_array *tr = top_trace_array();
        struct ftrace_event_call **iter, *call;
-       char *buf = bootup_event_buf;
-       char *token;
        int ret;
 
        if (!tr)
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void)
         */
        __trace_early_add_events(tr);
 
-       while (true) {
-               token = strsep(&buf, ",");
-
-               if (!token)
-                       break;
-               if (!*token)
-                       continue;
-
-               ret = ftrace_set_clr_event(tr, token, 1);
-               if (ret)
-                       pr_warn("Failed to enable trace event: %s\n", token);
-       }
+       early_enable_events(tr, false);
 
        trace_printk_start_comm();
 
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void)
        return 0;
 }
 
+/*
+ * event_trace_enable() is called from trace_event_init() first to
+ * initialize events and perhaps start any events that are on the
+ * command line. Unfortunately, there are some events that will not
+ * start this early, like the system call tracepoints that need
+ * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
+ * is called before pid 1 starts, and this flag is never set, making
+ * the syscall tracepoint never get reached, but the event is enabled
+ * regardless (and not doing anything).
+ */
+static __init int event_trace_enable_again(void)
+{
+       struct trace_array *tr;
+
+       tr = top_trace_array();
+       if (!tr)
+               return -ENODEV;
+
+       early_enable_events(tr, true);
+
+       return 0;
+}
+
+early_initcall(event_trace_enable_again);
+
 static __init int event_trace_init(void)
 {
        struct trace_array *tr;
index c6565f00fb38bdc89e0c0ae061786af4bc9c1667..54f3a9b0095600749fda793d7e6067bd2c7f997d 100644 (file)
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 
 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
+       if (!tlb->end)
+               return;
+
        tlb_flush(tlb);
        mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
        struct mmu_gather_batch *batch;
 
-       for (batch = &tlb->local; batch; batch = batch->next) {
+       for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
                free_pages_and_swap_cache(batch->pages, batch->nr);
                batch->nr = 0;
        }
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 
 void tlb_flush_mmu(struct mmu_gather *tlb)
 {
-       if (!tlb->end)
-               return;
-
        tlb_flush_mmu_tlbonly(tlb);
        tlb_flush_mmu_free(tlb);
 }
index 1f1de715197c19ab12f5333e9a518a317754f041..e2aa7be3a847f448a404e0a43f6d1a09f1a0517a 100644 (file)
@@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
        dst = NULL;
 
        if (is_broadcast_ether_addr(dest)) {
-               if (p->flags & BR_PROXYARP &&
+               if (IS_ENABLED(CONFIG_INET) &&
+                   p->flags & BR_PROXYARP &&
                    skb->protocol == htons(ETH_P_ARP))
                        br_do_proxy_arp(skb, br, vid);
 
index 8e38f17288d3c5a475471b0e56e339d9b6d5bf9e..8d614c93f86a233a5cb3864c600d9c68fe5f9ea1 100644 (file)
@@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
                        case NDTPA_BASE_REACHABLE_TIME:
                                NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
                                              nla_get_msecs(tbp[i]));
+                               /* update reachable_time as well, otherwise, the change will
+                                * only be effective after the next time neigh_periodic_work
+                                * decides to recompute it (can be multiple minutes)
+                                */
+                               p->reachable_time =
+                                       neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
                                break;
                        case NDTPA_GC_STALETIME:
                                NEIGH_VAR_SET(p, GC_STALETIME,
@@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
        return ret;
 }
 
+static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
+                                         void __user *buffer,
+                                         size_t *lenp, loff_t *ppos)
+{
+       struct neigh_parms *p = ctl->extra2;
+       int ret;
+
+       if (strcmp(ctl->procname, "base_reachable_time") == 0)
+               ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+       else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
+               ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+       else
+               ret = -1;
+
+       if (write && ret == 0) {
+               /* update reachable_time as well, otherwise, the change will
+                * only be effective after the next time neigh_periodic_work
+                * decides to recompute it
+                */
+               p->reachable_time =
+                       neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+       }
+       return ret;
+}
+
 #define NEIGH_PARMS_DATA_OFFSET(index) \
        (&((struct neigh_parms *) 0)->data[index])
 
@@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
                t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
                /* ReachableTime (in milliseconds) */
                t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+       } else {
+               /* Those handlers will update p->reachable_time after
+                * base_reachable_time(_ms) is set to ensure the new timer starts being
+                * applied after the next neighbour update instead of waiting for
+                * neigh_periodic_work to update its value (can be multiple minutes)
+                * So any handler that replaces them should do this as well
+                */
+               /* ReachableTime */
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
+                       neigh_proc_base_reachable_time;
+               /* ReachableTime (in milliseconds) */
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
+                       neigh_proc_base_reachable_time;
        }
 
        /* Don't export sysctls to unprivileged users */
index ff2d23d8c87a16964e29f478640dccd5ca527a02..6ecfce63201a2753d4943813d3eccbb951f34d0b 100644 (file)
@@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
 
        memset(&mr, 0, sizeof(mr));
        if (priv->sreg_proto_min) {
-               mr.range[0].min.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               mr.range[0].max.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               mr.range[0].min.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               mr.range[0].max.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 2433a6bfb191c4259bedfe2d697baa618c818b51..11820b6b36130d4ec83af3f173a1ca70df8cbf93 100644 (file)
@@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
 
        memset(&range, 0, sizeof(range));
        if (priv->sreg_proto_min) {
-               range.min_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               range.max_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               range.min_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               range.max_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 1d5341f3761dfe1e57cc6505493bf270cca672de..5d3daae98bf0be1bc0fa699ec96208d2fa3da1d4 100644 (file)
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct nf_conn *ct;
        struct net *net;
 
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct ip_vs_conn *n_cp;
        struct net *net;
 
+       /* no diff required for incoming packets */
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       /* no diff required for incoming packets */
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
index a11674806707e18fb9d86860ad0717dfda0b0da2..46d1b26a468ed0d4cd0c9f30c02398f51c922db0 100644 (file)
@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
         */
        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
        pr_debug("Confirming conntrack %p\n", ct);
-       /* We have to check the DYING flag inside the lock to prevent
-          a race against nf_ct_get_next_corpse() possibly called from
-          user context, else we insert an already 'dead' hash, blocking
-          further use of that particular connection -JM */
+       /* We have to check the DYING flag after unlink to prevent
+        * a race against nf_ct_get_next_corpse() possibly called from
+        * user context, else we insert an already 'dead' hash, blocking
+        * further use of that particular connection -JM.
+        */
+       nf_ct_del_from_dying_or_unconfirmed_list(ct);
 
-       if (unlikely(nf_ct_is_dying(ct))) {
-               nf_conntrack_double_unlock(hash, reply_hash);
-               local_bh_enable();
-               return NF_ACCEPT;
-       }
+       if (unlikely(nf_ct_is_dying(ct)))
+               goto out;
 
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
 
-       nf_ct_del_from_dying_or_unconfirmed_list(ct);
-
        /* Timer relative to confirmation time, not original
           setting time, otherwise we'd get timer wrap in
           weird delay cases. */
@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        return NF_ACCEPT;
 
 out:
+       nf_ct_add_to_dying_list(ct);
        nf_conntrack_double_unlock(hash, reply_hash);
        NF_CT_STAT_INC(net, insert_failed);
        local_bh_enable();
index 129a8daa4abf31959801e99c4f2fbfc7f1aab230..3b3ddb4fb9ee122a5b6d3a39450be38a64d6f614 100644 (file)
@@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
        struct nft_chain *chain, *nc;
        struct nft_set *set, *ns;
 
-       list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+       list_for_each_entry(chain, &ctx->table->chains, list) {
                ctx->chain = chain;
 
                err = nft_delrule_by_chain(ctx);
                if (err < 0)
                        goto out;
-
-               err = nft_delchain(ctx);
-               if (err < 0)
-                       goto out;
        }
 
        list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
@@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
                        goto out;
        }
 
+       list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+               ctx->chain = chain;
+
+               err = nft_delchain(ctx);
+               if (err < 0)
+                       goto out;
+       }
+
        err = nft_deltable(ctx);
 out:
        return err;
index cde4a6702fa3199421d6ced324e2bb74fdaa46ab..c421d94c4652625147ba65aebe4c251561cf37ff 100644 (file)
@@ -321,7 +321,8 @@ replay:
                nlh = nlmsg_hdr(skb);
                err = 0;
 
-               if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+               if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
+                   skb->len < nlh->nlmsg_len) {
                        err = -EINVAL;
                        goto ack;
                }
@@ -469,7 +470,7 @@ static int nfnetlink_bind(struct net *net, int group)
        int type;
 
        if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
-               return -EINVAL;
+               return 0;
 
        type = nfnl_group2type[group];
 
index afe2b0b45ec41f82df6f2430958a4392aa5eb608..aff54fb1c8a09fdb99fd4caab0959a80f28ffc37 100644 (file)
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
        }
 
        if (priv->sreg_proto_min) {
-               range.min_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               range.max_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               range.min_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               range.max_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 4e9a5f035cbcf144998ae7c22c90151429cf7031..b07349e82d788dba64e768cf3ea1da77b08a9652 100644 (file)
@@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        struct vport *input_vport;
        int len;
        int err;
-       bool log = !a[OVS_FLOW_ATTR_PROBE];
+       bool log = !a[OVS_PACKET_ATTR_PROBE];
 
        err = -EINVAL;
        if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
        [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
        [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
        [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+       [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
 };
 
 static const struct genl_ops dp_packet_genl_ops[] = {
index 6880f34a529a56510b1145e14d41ed04803dd6a7..9cfe2e1dd8b5099bbac7d824241bd0d06cb01cf7 100644 (file)
@@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        err = -EINVAL;
        if (sock->type == SOCK_DGRAM) {
                offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
-               if (unlikely(offset) < 0)
+               if (unlikely(offset < 0))
                        goto out_free;
        } else {
                if (ll_header_truncated(dev, len))
index 96ceefeb9daf4eb780cd168b4d008418c0055dd9..a9e174fc0f91fd672eb3779f23fff9ef7decdb74 100644 (file)
@@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
        struct sk_buff *skb;
 
        skb_queue_walk(&bcl->outqueue, skb) {
-               if (more(buf_seqno(skb), after))
+               if (more(buf_seqno(skb), after)) {
+                       tipc_link_retransmit(bcl, skb, mod(to - after));
                        break;
+               }
        }
-       tipc_link_retransmit(bcl, skb, mod(to - after));
 }
 
 /**
index d273624c93a642544f8ba2b31d45cebbc505dc52..e238c9559caf9a7757d2d389e0bda57cd73229a8 100644 (file)
@@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags,
 }
 
 static int check_execveat_invoked_rc(int fd, const char *path, int flags,
-                                    int expected_rc)
+                                    int expected_rc, int expected_rc2)
 {
        int status;
        int rc;
@@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
                        child, status);
                return 1;
        }
-       if (WEXITSTATUS(status) != expected_rc) {
-               printf("[FAIL] (child %d exited with %d not %d)\n",
-                       child, WEXITSTATUS(status), expected_rc);
+       if ((WEXITSTATUS(status) != expected_rc) &&
+           (WEXITSTATUS(status) != expected_rc2)) {
+               printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
+                       child, WEXITSTATUS(status), expected_rc, expected_rc2);
                return 1;
        }
        printf("[OK]\n");
@@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
 
 static int check_execveat(int fd, const char *path, int flags)
 {
-       return check_execveat_invoked_rc(fd, path, flags, 99);
+       return check_execveat_invoked_rc(fd, path, flags, 99, 99);
 }
 
 static char *concat(const char *left, const char *right)
@@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
         * Execute as a long pathname relative to ".".  If this is a script,
         * the interpreter will launch but fail to open the script because its
         * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
+        *
+        * The failure code is usually 127 (POSIX: "If a command is not found,
+        * the exit status shall be 127."), but some systems give 126 (POSIX:
+        * "If the command name is found, but it is not an executable utility,
+        * the exit status shall be 126."), so allow either.
         */
        if (is_script)
-               fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127);
+               fail += check_execveat_invoked_rc(dot_dfd, longpath, 0,
+                                                 127, 126);
        else
                fail += check_execveat(dot_dfd, longpath, 0);
 
index 94dae65eea4183b43bf3e52bf9bf22dc9c0d4471..8519e9ee97e3d3e4a344e798cabf5b38edfff602 100644 (file)
@@ -536,10 +536,9 @@ int main(int argc, char *argv[])
 {
        struct mq_attr attr;
        char *option, *next_option;
-       int i, cpu;
+       int i, cpu, rc;
        struct sigaction sa;
        poptContext popt_context;
-       char rc;
        void *retval;
 
        main_thread = pthread_self();
index 4c4b1f631ecf61f6e3048d746c23be39ea4f2ef9..077828c889f1377886b98c93349919d55ccb57a2 100644 (file)
@@ -7,7 +7,7 @@ BINARIES += transhuge-stress
 
 all: $(BINARIES)
 %: %.c
-       $(CC) $(CFLAGS) -o $@ $^
+       $(CC) $(CFLAGS) -o $@ $^ -lrt
 
 run_tests: all
        @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)