Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 Dec 2018 22:21:17 +0000 (14:21 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 Dec 2018 22:21:17 +0000 (14:21 -0800)
Pull more networking fixes from David Miller:
 "Some more bug fixes have trickled in, we have:

  1) Local MAC entries properly in mscc driver, from Allan W. Nielsen.

  2) Eric Dumazet found some more of the typical "pskb_may_pull() -->
     oops forgot to reload the header pointer" bugs in ipv6 tunnel
     handling.

  3) Bad SKB socket pointer in ipv6 fragmentation handling, from Herbert
     Xu.

  4) Overflow fix in sk_msg_clone(), from Vakul Garg.

  5) Validate address lengths in AF_PACKET, from Willem de Bruijn"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  qmi_wwan: Fix qmap header retrieval in qmimux_rx_fixup
  qmi_wwan: Add support for Fibocom NL678 series
  tls: Do not call sk_memcopy_from_iter with zero length
  ipv6: tunnels: fix two use-after-free
  Prevent overflow of sk_msg in sk_msg_clone()
  packet: validate address length
  net: netxen: fix a missing check and an uninitialized use
  tcp: fix a race in inet_diag_dump_icsk()
  MAINTAINERS: update cxgb4 and cxgb3 maintainer
  ipv6: frags: Fix bogus skb->sk in reassembled packets
  mscc: Configured MAC entries should be locked.

49 files changed:
MAINTAINERS
Makefile
arch/x86/Makefile
arch/x86/entry/calling.h
arch/x86/entry/vdso/Makefile
arch/x86/include/asm/alternative-asm.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/asm.h
arch/x86/include/asm/bug.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/fsgsbase.h
arch/x86/include/asm/jump_label.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/asm/refcount.h
arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
arch/x86/kernel/cpu/mtrr/if.c
arch/x86/kernel/macros.S [deleted file]
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kvm/svm.c
arch/x86/mm/dump_pagetables.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/xen/mmu_pv.c
drivers/gpio/gpio-max7301.c
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/drm_ioctl.c
drivers/i2c/busses/i2c-nvidia-gpu.c
drivers/input/keyboard/omap4-keypad.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/kapi.c
fs/cifs/smb2inode.c
fs/cifs/smb2ops.c
fs/cifs/smb2proto.h
fs/ubifs/Kconfig
fs/ubifs/lpt.c
fs/ubifs/replay.c
fs/ubifs/sb.c
include/asm-generic/bug.h
include/linux/compiler.h
kernel/futex.c
kernel/time/posix-timers.c
scripts/Kbuild.include
scripts/mod/Makefile

index 52e664b5a68d5d07b57d87d8c7db479061129e82..f3a5c97e34196680db3be6e92db9386155a33b7d 100644 (file)
@@ -2633,6 +2633,13 @@ S:       Maintained
 F:     Documentation/devicetree/bindings/sound/axentia,*
 F:     sound/soc/atmel/tse850-pcm5142.c
 
+AXXIA I2C CONTROLLER
+M:     Krzysztof Adamski <krzysztof.adamski@nokia.com>
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/i2c/i2c-axxia.txt
+F:     drivers/i2c/busses/i2c-axxia.c
+
 AZ6007 DVB DRIVER
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
index d45856f80057d61e66fed31e48ca858c08b3d88d..b183f68d17fe5527de0f5c993b44057dd91b6d6d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1076,7 +1076,7 @@ scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h)
 # version.h and scripts_basic is processed / created.
 
 # Listed in dependency order
-PHONY += prepare archprepare macroprepare prepare0 prepare1 prepare2 prepare3
+PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
 
 # prepare3 is used to check if we are building in a separate output directory,
 # and if so do:
@@ -1099,9 +1099,7 @@ prepare2: prepare3 outputmakefile asm-generic
 prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h
        $(cmd_crmodverdir)
 
-macroprepare: prepare1 archmacros
-
-archprepare: archheaders archscripts macroprepare scripts_basic
+archprepare: archheaders archscripts prepare1 scripts_basic
 
 prepare0: archprepare gcc-plugins
        $(Q)$(MAKE) $(build)=.
@@ -1177,9 +1175,6 @@ archheaders:
 PHONY += archscripts
 archscripts:
 
-PHONY += archmacros
-archmacros:
-
 PHONY += __headers
 __headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts
        $(Q)$(MAKE) $(build)=scripts build_unifdef
index 75ef499a66e2b81c82fb6abb9bb4bd9a64521e73..85a66c4a8b652b2568473a3384785364495846ab 100644 (file)
@@ -232,13 +232,6 @@ archscripts: scripts_basic
 archheaders:
        $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
 
-archmacros:
-       $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
-
-ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
-export ASM_MACRO_FLAGS
-KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
-
 ###
 # Kernel objects
 
index 25e5a6bda8c3a971609dff93919ccab27d6a3aa9..20d0885b00fbec4c77dfee23c701ba0c3612890b 100644 (file)
@@ -352,7 +352,7 @@ For 32-bit we have the following conventions - kernel is built with
 .macro CALL_enter_from_user_mode
 #ifdef CONFIG_CONTEXT_TRACKING
 #ifdef HAVE_JUMP_LABEL
-       STATIC_BRANCH_JMP l_yes=.Lafter_call_\@, key=context_tracking_enabled, branch=1
+       STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
 #endif
        call enter_from_user_mode
 .Lafter_call_\@:
index 0624bf2266fd76d2852ce005acb2f9d67dbe6b8f..5bfe2243a08f882c4ab622cd87799ac1a28ff3c2 100644 (file)
@@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO    $@
                 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
 VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
-       $(call ld-option, --build-id) -Bsymbolic
+       $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
+       -Bsymbolic
 GCOV_PROFILE := n
 
 #
index 8e4ea39e55d071de447bb1c1336dce2ec314ad5e..31b627b43a8e01933d6209e746f4c08912d0cdef 100644 (file)
@@ -7,24 +7,16 @@
 #include <asm/asm.h>
 
 #ifdef CONFIG_SMP
-.macro LOCK_PREFIX_HERE
+       .macro LOCK_PREFIX
+672:   lock
        .pushsection .smp_locks,"a"
        .balign 4
-       .long 671f - .          # offset
+       .long 672b - .
        .popsection
-671:
-.endm
-
-.macro LOCK_PREFIX insn:vararg
-       LOCK_PREFIX_HERE
-       lock \insn
-.endm
+       .endm
 #else
-.macro LOCK_PREFIX_HERE
-.endm
-
-.macro LOCK_PREFIX insn:vararg
-.endm
+       .macro LOCK_PREFIX
+       .endm
 #endif
 
 /*
index d7faa16622d81d39ff11d703fad17ed7931cce86..4cd6a3b71824293ae3edb664bc5ed6e48ca5a459 100644 (file)
  */
 
 #ifdef CONFIG_SMP
-#define LOCK_PREFIX_HERE "LOCK_PREFIX_HERE\n\t"
-#define LOCK_PREFIX "LOCK_PREFIX "
+#define LOCK_PREFIX_HERE \
+               ".pushsection .smp_locks,\"a\"\n"       \
+               ".balign 4\n"                           \
+               ".long 671f - .\n" /* offset */         \
+               ".popsection\n"                         \
+               "671:"
+
+#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
+
 #else /* ! CONFIG_SMP */
 #define LOCK_PREFIX_HERE ""
 #define LOCK_PREFIX ""
index 21b086786404baff684ef450bd13aad5abd181ec..6467757bb39f6b6622c0121fe40f9f6fbcfd0b39 100644 (file)
 /* Exception table entry */
 #ifdef __ASSEMBLY__
 # define _ASM_EXTABLE_HANDLE(from, to, handler)                        \
-       ASM_EXTABLE_HANDLE from to handler
-
-.macro ASM_EXTABLE_HANDLE from:req to:req handler:req
-       .pushsection "__ex_table","a"
-       .balign 4
-       .long (\from) - .
-       .long (\to) - .
-       .long (\handler) - .
+       .pushsection "__ex_table","a" ;                         \
+       .balign 4 ;                                             \
+       .long (from) - . ;                                      \
+       .long (to) - . ;                                        \
+       .long (handler) - . ;                                   \
        .popsection
-.endm
-#else /* __ASSEMBLY__ */
-
-# define _ASM_EXTABLE_HANDLE(from, to, handler)                        \
-       "ASM_EXTABLE_HANDLE from=" #from " to=" #to             \
-       " handler=\"" #handler "\"\n\t"
-
-/* For C file, we already have NOKPROBE_SYMBOL macro */
-
-#endif /* __ASSEMBLY__ */
 
 # define _ASM_EXTABLE(from, to)                                        \
        _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
        _ASM_PTR (entry);                                       \
        .popsection
 
-#ifdef __ASSEMBLY__
 .macro ALIGN_DESTINATION
        /* check for bad alignment of destination */
        movl %edi,%ecx
        _ASM_EXTABLE_UA(100b, 103b)
        _ASM_EXTABLE_UA(101b, 103b)
        .endm
-#endif /* __ASSEMBLY__ */
+
+#else
+# define _EXPAND_EXTABLE_HANDLE(x) #x
+# define _ASM_EXTABLE_HANDLE(from, to, handler)                        \
+       " .pushsection \"__ex_table\",\"a\"\n"                  \
+       " .balign 4\n"                                          \
+       " .long (" #from ") - .\n"                              \
+       " .long (" #to ") - .\n"                                \
+       " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n"    \
+       " .popsection\n"
+
+# define _ASM_EXTABLE(from, to)                                        \
+       _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
+
+# define _ASM_EXTABLE_UA(from, to)                             \
+       _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+
+# define _ASM_EXTABLE_FAULT(from, to)                          \
+       _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
+
+# define _ASM_EXTABLE_EX(from, to)                             \
+       _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
+
+# define _ASM_EXTABLE_REFCOUNT(from, to)                       \
+       _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
+
+/* For C file, we already have NOKPROBE_SYMBOL macro */
+#endif
 
 #ifndef __ASSEMBLY__
 /*
index 5090035e6d160fed62820f4eb40a363bb6dff763..6804d66427673ec314659944e65052b5dfba273e 100644 (file)
@@ -4,8 +4,6 @@
 
 #include <linux/stringify.h>
 
-#ifndef __ASSEMBLY__
-
 /*
  * Despite that some emulators terminate on UD2, we use it for WARN().
  *
 
 #define LEN_UD2                2
 
+#ifdef CONFIG_GENERIC_BUG
+
+#ifdef CONFIG_X86_32
+# define __BUG_REL(val)        ".long " __stringify(val)
+#else
+# define __BUG_REL(val)        ".long " __stringify(val) " - 2b"
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define _BUG_FLAGS(ins, flags)                                         \
+do {                                                                   \
+       asm volatile("1:\t" ins "\n"                                    \
+                    ".pushsection __bug_table,\"aw\"\n"                \
+                    "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"   \
+                    "\t"  __BUG_REL(%c0) "\t# bug_entry::file\n"       \
+                    "\t.word %c1"        "\t# bug_entry::line\n"       \
+                    "\t.word %c2"        "\t# bug_entry::flags\n"      \
+                    "\t.org 2b+%c3\n"                                  \
+                    ".popsection"                                      \
+                    : : "i" (__FILE__), "i" (__LINE__),                \
+                        "i" (flags),                                   \
+                        "i" (sizeof(struct bug_entry)));               \
+} while (0)
+
+#else /* !CONFIG_DEBUG_BUGVERBOSE */
+
 #define _BUG_FLAGS(ins, flags)                                         \
 do {                                                                   \
-       asm volatile("ASM_BUG ins=\"" ins "\" file=%c0 line=%c1 "       \
-                    "flags=%c2 size=%c3"                               \
-                    : : "i" (__FILE__), "i" (__LINE__),                \
-                        "i" (flags),                                   \
+       asm volatile("1:\t" ins "\n"                                    \
+                    ".pushsection __bug_table,\"aw\"\n"                \
+                    "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"   \
+                    "\t.word %c0"        "\t# bug_entry::flags\n"      \
+                    "\t.org 2b+%c1\n"                                  \
+                    ".popsection"                                      \
+                    : : "i" (flags),                                   \
                         "i" (sizeof(struct bug_entry)));               \
 } while (0)
 
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#else
+
+#define _BUG_FLAGS(ins, flags)  asm volatile(ins)
+
+#endif /* CONFIG_GENERIC_BUG */
+
 #define HAVE_ARCH_BUG
 #define BUG()                                                  \
 do {                                                           \
@@ -46,54 +82,4 @@ do {                                                         \
 
 #include <asm-generic/bug.h>
 
-#else /* __ASSEMBLY__ */
-
-#ifdef CONFIG_GENERIC_BUG
-
-#ifdef CONFIG_X86_32
-.macro __BUG_REL val:req
-       .long \val
-.endm
-#else
-.macro __BUG_REL val:req
-       .long \val - 2b
-.endm
-#endif
-
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-
-.macro ASM_BUG ins:req file:req line:req flags:req size:req
-1:     \ins
-       .pushsection __bug_table,"aw"
-2:     __BUG_REL val=1b        # bug_entry::bug_addr
-       __BUG_REL val=\file     # bug_entry::file
-       .word \line             # bug_entry::line
-       .word \flags            # bug_entry::flags
-       .org 2b+\size
-       .popsection
-.endm
-
-#else /* !CONFIG_DEBUG_BUGVERBOSE */
-
-.macro ASM_BUG ins:req file:req line:req flags:req size:req
-1:     \ins
-       .pushsection __bug_table,"aw"
-2:     __BUG_REL val=1b        # bug_entry::bug_addr
-       .word \flags            # bug_entry::flags
-       .org 2b+\size
-       .popsection
-.endm
-
-#endif /* CONFIG_DEBUG_BUGVERBOSE */
-
-#else /* CONFIG_GENERIC_BUG */
-
-.macro ASM_BUG ins:req file:req line:req flags:req size:req
-       \ins
-.endm
-
-#endif /* CONFIG_GENERIC_BUG */
-
-#endif /* __ASSEMBLY__ */
-
 #endif /* _ASM_X86_BUG_H */
index 7d442722ef241b684c348dc47b6be916d4728a3d..aced6c9290d6f96cdaf4eaadab3dd3835d80b94a 100644 (file)
@@ -2,10 +2,10 @@
 #ifndef _ASM_X86_CPUFEATURE_H
 #define _ASM_X86_CPUFEATURE_H
 
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
 #include <asm/processor.h>
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+
 #include <asm/asm.h>
 #include <linux/bitops.h>
 
@@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
  */
 static __always_inline __pure bool _static_cpu_has(u16 bit)
 {
-       asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] "
-                         "cap_byte=\"%[cap_byte]\" "
-                         "feature=%P[feature] t_yes=%l[t_yes] "
-                         "t_no=%l[t_no] always=%P[always]"
+       asm_volatile_goto("1: jmp 6f\n"
+                "2:\n"
+                ".skip -(((5f-4f) - (2b-1b)) > 0) * "
+                        "((5f-4f) - (2b-1b)),0x90\n"
+                "3:\n"
+                ".section .altinstructions,\"a\"\n"
+                " .long 1b - .\n"              /* src offset */
+                " .long 4f - .\n"              /* repl offset */
+                " .word %P[always]\n"          /* always replace */
+                " .byte 3b - 1b\n"             /* src len */
+                " .byte 5f - 4f\n"             /* repl len */
+                " .byte 3b - 2b\n"             /* pad len */
+                ".previous\n"
+                ".section .altinstr_replacement,\"ax\"\n"
+                "4: jmp %l[t_no]\n"
+                "5:\n"
+                ".previous\n"
+                ".section .altinstructions,\"a\"\n"
+                " .long 1b - .\n"              /* src offset */
+                " .long 0\n"                   /* no replacement */
+                " .word %P[feature]\n"         /* feature bit */
+                " .byte 3b - 1b\n"             /* src len */
+                " .byte 0\n"                   /* repl len */
+                " .byte 0\n"                   /* pad len */
+                ".previous\n"
+                ".section .altinstr_aux,\"ax\"\n"
+                "6:\n"
+                " testb %[bitnum],%[cap_byte]\n"
+                " jnz %l[t_yes]\n"
+                " jmp %l[t_no]\n"
+                ".previous\n"
                 : : [feature]  "i" (bit),
                     [always]   "i" (X86_FEATURE_ALWAYS),
                     [bitnum]   "i" (1 << (bit & 7)),
@@ -199,44 +226,5 @@ t_no:
 #define CPU_FEATURE_TYPEVAL            boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
                                        boot_cpu_data.x86_model
 
-#else /* __ASSEMBLY__ */
-
-.macro STATIC_CPU_HAS bitnum:req cap_byte:req feature:req t_yes:req t_no:req always:req
-1:
-       jmp 6f
-2:
-       .skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90
-3:
-       .section .altinstructions,"a"
-       .long 1b - .            /* src offset */
-       .long 4f - .            /* repl offset */
-       .word \always           /* always replace */
-       .byte 3b - 1b           /* src len */
-       .byte 5f - 4f           /* repl len */
-       .byte 3b - 2b           /* pad len */
-       .previous
-       .section .altinstr_replacement,"ax"
-4:
-       jmp \t_no
-5:
-       .previous
-       .section .altinstructions,"a"
-       .long 1b - .            /* src offset */
-       .long 0                 /* no replacement */
-       .word \feature          /* feature bit */
-       .byte 3b - 1b           /* src len */
-       .byte 0                 /* repl len */
-       .byte 0                 /* pad len */
-       .previous
-       .section .altinstr_aux,"ax"
-6:
-       testb \bitnum,\cap_byte
-       jnz \t_yes
-       jmp \t_no
-       .previous
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
 #endif /* _ASM_X86_CPUFEATURE_H */
index eb377b6e9eedee2e311bb13c1616e0ecbe7af350..bca4c743de77c6d80f21f1bb4aeb0e2a188d824b 100644 (file)
@@ -16,8 +16,8 @@
  */
 extern unsigned long x86_fsbase_read_task(struct task_struct *task);
 extern unsigned long x86_gsbase_read_task(struct task_struct *task);
-extern int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
-extern int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
+extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
+extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
 
 /* Helper functions for reading/writing FS/GS base */
 
@@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void)
        return gsbase;
 }
 
-extern void x86_fsbase_write_cpu(unsigned long fsbase);
-extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase);
+static inline void x86_fsbase_write_cpu(unsigned long fsbase)
+{
+       wrmsrl(MSR_FS_BASE, fsbase);
+}
+
+static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
+{
+       wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+}
 
 #endif /* CONFIG_X86_64 */
 
index a5fb34fe56a4bb31f78023ff3d258132ff93ee16..21efc9d07ed909adfc37b06188b331ea0e6f747d 100644 (file)
@@ -2,6 +2,19 @@
 #ifndef _ASM_X86_JUMP_LABEL_H
 #define _ASM_X86_JUMP_LABEL_H
 
+#ifndef HAVE_JUMP_LABEL
+/*
+ * For better or for worse, if jump labels (the gcc extension) are missing,
+ * then the entire static branch patching infrastructure is compiled out.
+ * If that happens, the code in here will malfunction.  Raise a compiler
+ * error instead.
+ *
+ * In theory, jump labels and the static branch patching infrastructure
+ * could be decoupled to fix this.
+ */
+#error asm/jump_label.h included on a non-jump-label kernel
+#endif
+
 #define JUMP_LABEL_NOP_SIZE 5
 
 #ifdef CONFIG_X86_64
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" "
-                         "branch=\"%c1\""
-                       : :  "i" (key), "i" (branch) : : l_yes);
+       asm_volatile_goto("1:"
+               ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
+               ".pushsection __jump_table,  \"aw\" \n\t"
+               _ASM_ALIGN "\n\t"
+               ".long 1b - ., %l[l_yes] - . \n\t"
+               _ASM_PTR "%c0 + %c1 - .\n\t"
+               ".popsection \n\t"
+               : :  "i" (key), "i" (branch) : : l_yes);
+
        return false;
 l_yes:
        return true;
@@ -30,8 +49,14 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" "
-                         "branch=\"%c1\""
+       asm_volatile_goto("1:"
+               ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
+               "2:\n\t"
+               ".pushsection __jump_table,  \"aw\" \n\t"
+               _ASM_ALIGN "\n\t"
+               ".long 1b - ., %l[l_yes] - . \n\t"
+               _ASM_PTR "%c0 + %c1 - .\n\t"
+               ".popsection \n\t"
                : :  "i" (key), "i" (branch) : : l_yes);
 
        return false;
@@ -41,26 +66,37 @@ l_yes:
 
 #else  /* __ASSEMBLY__ */
 
-.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req
-.Lstatic_branch_nop_\@:
-       .byte STATIC_KEY_INIT_NOP
-.Lstatic_branch_no_after_\@:
+.macro STATIC_JUMP_IF_TRUE target, key, def
+.Lstatic_jump_\@:
+       .if \def
+       /* Equivalent to "jmp.d32 \target" */
+       .byte           0xe9
+       .long           \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+       .else
+       .byte           STATIC_KEY_INIT_NOP
+       .endif
        .pushsection __jump_table, "aw"
        _ASM_ALIGN
-       .long           .Lstatic_branch_nop_\@ - ., \l_yes - .
-       _ASM_PTR        \key + \branch - .
+       .long           .Lstatic_jump_\@ - ., \target - .
+       _ASM_PTR        \key - .
        .popsection
 .endm
 
-.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req
-.Lstatic_branch_jmp_\@:
-       .byte 0xe9
-       .long \l_yes - .Lstatic_branch_jmp_after_\@
-.Lstatic_branch_jmp_after_\@:
+.macro STATIC_JUMP_IF_FALSE target, key, def
+.Lstatic_jump_\@:
+       .if \def
+       .byte           STATIC_KEY_INIT_NOP
+       .else
+       /* Equivalent to "jmp.d32 \target" */
+       .byte           0xe9
+       .long           \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+       .endif
        .pushsection __jump_table, "aw"
        _ASM_ALIGN
-       .long           .Lstatic_branch_jmp_\@ - ., \l_yes - .
-       _ASM_PTR        \key + \branch - .
+       .long           .Lstatic_jump_\@ - ., \target - .
+       _ASM_PTR        \key + 1 - .
        .popsection
 .endm
 
index 26942ad63830407255afc9e6de77267056a97135..488c59686a733cc8ad627f7150a7d4aab72507b9 100644 (file)
@@ -348,11 +348,23 @@ extern struct paravirt_patch_template pv_ops;
 #define paravirt_clobber(clobber)              \
        [paravirt_clobber] "i" (clobber)
 
+/*
+ * Generate some code, and mark it as patchable by the
+ * apply_paravirt() alternate instruction patcher.
+ */
+#define _paravirt_alt(insn_string, type, clobber)      \
+       "771:\n\t" insn_string "\n" "772:\n"            \
+       ".pushsection .parainstructions,\"a\"\n"        \
+       _ASM_ALIGN "\n"                                 \
+       _ASM_PTR " 771b\n"                              \
+       "  .byte " type "\n"                            \
+       "  .byte 772b-771b\n"                           \
+       "  .short " clobber "\n"                        \
+       ".popsection\n"
+
 /* Generate patchable code, with the default asm parameters. */
-#define paravirt_call                                                  \
-       "PARAVIRT_CALL type=\"%c[paravirt_typenum]\""                   \
-       " clobber=\"%c[paravirt_clobber]\""                             \
-       " pv_opptr=\"%c[paravirt_opptr]\";"
+#define paravirt_alt(insn_string)                                      \
+       _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
 
 /* Simple instruction patching code. */
 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
@@ -372,6 +384,16 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len);
 
 int paravirt_disable_iospace(void);
 
+/*
+ * This generates an indirect call based on the operation type number.
+ * The type number, computed in PARAVIRT_PATCH, is derived from the
+ * offset into the paravirt_patch_template structure, and can therefore be
+ * freely converted back into a structure offset.
+ */
+#define PARAVIRT_CALL                                  \
+       ANNOTATE_RETPOLINE_SAFE                         \
+       "call *%c[paravirt_opptr];"
+
 /*
  * These macros are intended to wrap calls through one of the paravirt
  * ops structs, so that they can be later identified and patched at
@@ -509,7 +531,7 @@ int paravirt_disable_iospace(void);
                /* since this condition will never hold */              \
                if (sizeof(rettype) > sizeof(unsigned long)) {          \
                        asm volatile(pre                                \
-                                    paravirt_call                      \
+                                    paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
                                     : call_clbr, ASM_CALL_CONSTRAINT   \
                                     : paravirt_type(op),               \
@@ -519,7 +541,7 @@ int paravirt_disable_iospace(void);
                        __ret = (rettype)((((u64)__edx) << 32) | __eax); \
                } else {                                                \
                        asm volatile(pre                                \
-                                    paravirt_call                      \
+                                    paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
                                     : call_clbr, ASM_CALL_CONSTRAINT   \
                                     : paravirt_type(op),               \
@@ -546,7 +568,7 @@ int paravirt_disable_iospace(void);
                PVOP_VCALL_ARGS;                                        \
                PVOP_TEST_NULL(op);                                     \
                asm volatile(pre                                        \
-                            paravirt_call                              \
+                            paravirt_alt(PARAVIRT_CALL)                \
                             post                                       \
                             : call_clbr, ASM_CALL_CONSTRAINT           \
                             : paravirt_type(op),                       \
@@ -664,26 +686,6 @@ struct paravirt_patch_site {
 extern struct paravirt_patch_site __parainstructions[],
        __parainstructions_end[];
 
-#else  /* __ASSEMBLY__ */
-
-/*
- * This generates an indirect call based on the operation type number.
- * The type number, computed in PARAVIRT_PATCH, is derived from the
- * offset into the paravirt_patch_template structure, and can therefore be
- * freely converted back into a structure offset.
- */
-.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req
-771:   ANNOTATE_RETPOLINE_SAFE
-       call *\pv_opptr
-772:   .pushsection .parainstructions,"a"
-       _ASM_ALIGN
-       _ASM_PTR 771b
-       .byte \type
-       .byte 772b-771b
-       .short \clobber
-       .popsection
-.endm
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_X86_PARAVIRT_TYPES_H */
index 84bd9bdc1987faa634cd1daad7dbfe94d586a82b..88bca456da994c5b2a76f7046bf12f4f2615361d 100644 (file)
@@ -111,6 +111,11 @@ extern unsigned int ptrs_per_p4d;
  */
 #define MAXMEM                 (1UL << MAX_PHYSMEM_BITS)
 
+#define GUARD_HOLE_PGD_ENTRY   -256UL
+#define GUARD_HOLE_SIZE                (16UL << PGDIR_SHIFT)
+#define GUARD_HOLE_BASE_ADDR   (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
+#define GUARD_HOLE_END_ADDR    (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
+
 #define LDT_PGD_ENTRY          -240UL
 #define LDT_BASE_ADDR          (LDT_PGD_ENTRY << PGDIR_SHIFT)
 #define LDT_END_ADDR           (LDT_BASE_ADDR + PGDIR_SIZE)
index a8b5e1e133190ff1b8acf6d38795485d11fd061d..dbaed55c1c2442263624aeea5c1faa5afeefba26 100644 (file)
@@ -4,41 +4,6 @@
  * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
  * PaX/grsecurity.
  */
-
-#ifdef __ASSEMBLY__
-
-#include <asm/asm.h>
-#include <asm/bug.h>
-
-.macro REFCOUNT_EXCEPTION counter:req
-       .pushsection .text..refcount
-111:   lea \counter, %_ASM_CX
-112:   ud2
-       ASM_UNREACHABLE
-       .popsection
-113:   _ASM_EXTABLE_REFCOUNT(112b, 113b)
-.endm
-
-/* Trigger refcount exception if refcount result is negative. */
-.macro REFCOUNT_CHECK_LT_ZERO counter:req
-       js 111f
-       REFCOUNT_EXCEPTION counter="\counter"
-.endm
-
-/* Trigger refcount exception if refcount result is zero or negative. */
-.macro REFCOUNT_CHECK_LE_ZERO counter:req
-       jz 111f
-       REFCOUNT_CHECK_LT_ZERO counter="\counter"
-.endm
-
-/* Trigger refcount exception unconditionally. */
-.macro REFCOUNT_ERROR counter:req
-       jmp 111f
-       REFCOUNT_EXCEPTION counter="\counter"
-.endm
-
-#else /* __ASSEMBLY__ */
-
 #include <linux/refcount.h>
 #include <asm/bug.h>
 
  * central refcount exception. The fixup address for the exception points
  * back to the regular execution flow in .text.
  */
+#define _REFCOUNT_EXCEPTION                            \
+       ".pushsection .text..refcount\n"                \
+       "111:\tlea %[var], %%" _ASM_CX "\n"             \
+       "112:\t" ASM_UD2 "\n"                           \
+       ASM_UNREACHABLE                                 \
+       ".popsection\n"                                 \
+       "113:\n"                                        \
+       _ASM_EXTABLE_REFCOUNT(112b, 113b)
+
+/* Trigger refcount exception if refcount result is negative. */
+#define REFCOUNT_CHECK_LT_ZERO                         \
+       "js 111f\n\t"                                   \
+       _REFCOUNT_EXCEPTION
+
+/* Trigger refcount exception if refcount result is zero or negative. */
+#define REFCOUNT_CHECK_LE_ZERO                         \
+       "jz 111f\n\t"                                   \
+       REFCOUNT_CHECK_LT_ZERO
+
+/* Trigger refcount exception unconditionally. */
+#define REFCOUNT_ERROR                                 \
+       "jmp 111f\n\t"                                  \
+       _REFCOUNT_EXCEPTION
 
 static __always_inline void refcount_add(unsigned int i, refcount_t *r)
 {
        asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
-               "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
-               : [counter] "+m" (r->refs.counter)
+               REFCOUNT_CHECK_LT_ZERO
+               : [var] "+m" (r->refs.counter)
                : "ir" (i)
                : "cc", "cx");
 }
@@ -63,32 +51,31 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r)
 static __always_inline void refcount_inc(refcount_t *r)
 {
        asm volatile(LOCK_PREFIX "incl %0\n\t"
-               "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
-               : [counter] "+m" (r->refs.counter)
+               REFCOUNT_CHECK_LT_ZERO
+               : [var] "+m" (r->refs.counter)
                : : "cc", "cx");
 }
 
 static __always_inline void refcount_dec(refcount_t *r)
 {
        asm volatile(LOCK_PREFIX "decl %0\n\t"
-               "REFCOUNT_CHECK_LE_ZERO counter=\"%[counter]\""
-               : [counter] "+m" (r->refs.counter)
+               REFCOUNT_CHECK_LE_ZERO
+               : [var] "+m" (r->refs.counter)
                : : "cc", "cx");
 }
 
 static __always_inline __must_check
 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
 {
-
        return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
-                                        "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"",
+                                        REFCOUNT_CHECK_LT_ZERO,
                                         r->refs.counter, e, "er", i, "cx");
 }
 
 static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
 {
        return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
-                                       "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"",
+                                       REFCOUNT_CHECK_LT_ZERO,
                                        r->refs.counter, e, "cx");
 }
 
@@ -106,8 +93,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
 
                /* Did we try to increment from/to an undesirable state? */
                if (unlikely(c < 0 || c == INT_MAX || result < c)) {
-                       asm volatile("REFCOUNT_ERROR counter=\"%[counter]\""
-                                    : : [counter] "m" (r->refs.counter)
+                       asm volatile(REFCOUNT_ERROR
+                                    : : [var] "m" (r->refs.counter)
                                     : "cc", "cx");
                        break;
                }
@@ -122,6 +109,4 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
        return refcount_add_not_zero(1, r);
 }
 
-#endif /* __ASSEMBLY__ */
-
 #endif
index 27937458c231b62772bf956b1bc584fc030281ba..efa4a519f5e552eb1bb67f4ea980728310b3e0a5 100644 (file)
@@ -23,6 +23,7 @@
 
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/kernfs.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -310,9 +311,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                return -EINVAL;
        buf[nbytes - 1] = '\0';
 
+       cpus_read_lock();
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
        if (!rdtgrp) {
                rdtgroup_kn_unlock(of->kn);
+               cpus_read_unlock();
                return -ENOENT;
        }
        rdt_last_cmd_clear();
@@ -367,6 +370,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 
 out:
        rdtgroup_kn_unlock(of->kn);
+       cpus_read_unlock();
        return ret ?: nbytes;
 }
 
index 2e173d47b450d4febbb9e2028f153bc91382b915..4d36dcc1cf87c5b75bcf085df5e77d745a852070 100644 (file)
@@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
        struct mtrr_gentry gentry;
        void __user *arg = (void __user *) __arg;
 
+       memset(&gentry, 0, sizeof(gentry));
+
        switch (cmd) {
        case MTRRIOC_ADD_ENTRY:
        case MTRRIOC_SET_ENTRY:
diff --git a/arch/x86/kernel/macros.S b/arch/x86/kernel/macros.S
deleted file mode 100644 (file)
index 161c950..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * This file includes headers whose assembly part includes macros which are
- * commonly used. The macros are precompiled into assmebly file which is later
- * assembled together with each compiled file.
- */
-
-#include <linux/compiler.h>
-#include <asm/refcount.h>
-#include <asm/alternative-asm.h>
-#include <asm/bug.h>
-#include <asm/paravirt.h>
-#include <asm/asm.h>
-#include <asm/cpufeature.h>
-#include <asm/jump_label.h>
index bbfbf017065c387c76f3f7c6394f34816fb7a4f5..ddd4fa718c43d271cdcdf326ad88339bc412829d 100644 (file)
@@ -339,24 +339,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
        return base;
 }
 
-void x86_fsbase_write_cpu(unsigned long fsbase)
-{
-       /*
-        * Set the selector to 0 as a notion, that the segment base is
-        * overwritten, which will be checked for skipping the segment load
-        * during context switch.
-        */
-       loadseg(FS, 0);
-       wrmsrl(MSR_FS_BASE, fsbase);
-}
-
-void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
-{
-       /* Set the selector to 0 for the same reason as %fs above. */
-       loadseg(GS, 0);
-       wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
-}
-
 unsigned long x86_fsbase_read_task(struct task_struct *task)
 {
        unsigned long fsbase;
@@ -385,38 +367,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
        return gsbase;
 }
 
-int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
+void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
 {
-       /*
-        * Not strictly needed for %fs, but do it for symmetry
-        * with %gs
-        */
-       if (unlikely(fsbase >= TASK_SIZE_MAX))
-               return -EPERM;
+       WARN_ON_ONCE(task == current);
 
-       preempt_disable();
        task->thread.fsbase = fsbase;
-       if (task == current)
-               x86_fsbase_write_cpu(fsbase);
-       task->thread.fsindex = 0;
-       preempt_enable();
-
-       return 0;
 }
 
-int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
+void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
 {
-       if (unlikely(gsbase >= TASK_SIZE_MAX))
-               return -EPERM;
+       WARN_ON_ONCE(task == current);
 
-       preempt_disable();
        task->thread.gsbase = gsbase;
-       if (task == current)
-               x86_gsbase_write_cpu_inactive(gsbase);
-       task->thread.gsindex = 0;
-       preempt_enable();
-
-       return 0;
 }
 
 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
@@ -754,11 +716,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
 
        switch (option) {
        case ARCH_SET_GS: {
-               ret = x86_gsbase_write_task(task, arg2);
+               if (unlikely(arg2 >= TASK_SIZE_MAX))
+                       return -EPERM;
+
+               preempt_disable();
+               /*
+                * ARCH_SET_GS has always overwritten the index
+                * and the base. Zero is the most sensible value
+                * to put in the index, and is the only value that
+                * makes any sense if FSGSBASE is unavailable.
+                */
+               if (task == current) {
+                       loadseg(GS, 0);
+                       x86_gsbase_write_cpu_inactive(arg2);
+
+                       /*
+                        * On non-FSGSBASE systems, save_base_legacy() expects
+                        * that we also fill in thread.gsbase.
+                        */
+                       task->thread.gsbase = arg2;
+
+               } else {
+                       task->thread.gsindex = 0;
+                       x86_gsbase_write_task(task, arg2);
+               }
+               preempt_enable();
                break;
        }
        case ARCH_SET_FS: {
-               ret = x86_fsbase_write_task(task, arg2);
+               /*
+                * Not strictly needed for %fs, but do it for symmetry
+                * with %gs
+                */
+               if (unlikely(arg2 >= TASK_SIZE_MAX))
+                       return -EPERM;
+
+               preempt_disable();
+               /*
+                * Set the selector to 0 for the same reason
+                * as %gs above.
+                */
+               if (task == current) {
+                       loadseg(FS, 0);
+                       x86_fsbase_write_cpu(arg2);
+
+                       /*
+                        * On non-FSGSBASE systems, save_base_legacy() expects
+                        * that we also fill in thread.fsbase.
+                        */
+                       task->thread.fsbase = arg2;
+               } else {
+                       task->thread.fsindex = 0;
+                       x86_fsbase_write_task(task, arg2);
+               }
+               preempt_enable();
                break;
        }
        case ARCH_GET_FS: {
index ffae9b9740fdf3a53619f03c4261be6711d7e7a7..4b8ee05dd6addf89478a4f26e8b8d6be29329721 100644 (file)
@@ -397,11 +397,12 @@ static int putreg(struct task_struct *child,
                if (value >= TASK_SIZE_MAX)
                        return -EIO;
                /*
-                * When changing the FS base, use the same
-                * mechanism as for do_arch_prctl_64().
+                * When changing the FS base, use do_arch_prctl_64()
+                * to set the index to zero and to set the base
+                * as requested.
                 */
                if (child->thread.fsbase != value)
-                       return x86_fsbase_write_task(child, value);
+                       return do_arch_prctl_64(child, ARCH_SET_FS, value);
                return 0;
        case offsetof(struct user_regs_struct,gs_base):
                /*
@@ -410,7 +411,7 @@ static int putreg(struct task_struct *child,
                if (value >= TASK_SIZE_MAX)
                        return -EIO;
                if (child->thread.gsbase != value)
-                       return x86_gsbase_write_task(child, value);
+                       return do_arch_prctl_64(child, ARCH_SET_GS, value);
                return 0;
 #endif
        }
index cc6467b35a85f6cec9300011cfa0c464574ed5d3..101f53ccf5718d99e5c1e95927b153031114c497 100644 (file)
@@ -2937,6 +2937,8 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
 {
        WARN_ON(mmu_is_nested(vcpu));
+
+       vcpu->arch.mmu = &vcpu->arch.guest_mmu;
        kvm_init_shadow_mmu(vcpu);
        vcpu->arch.mmu->set_cr3           = nested_svm_set_tdp_cr3;
        vcpu->arch.mmu->get_cr3           = nested_svm_get_tdp_cr3;
@@ -2949,6 +2951,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
 
 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
 {
+       vcpu->arch.mmu = &vcpu->arch.root_mmu;
        vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
 }
 
@@ -3458,7 +3461,6 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
                svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
 
        if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
-               kvm_mmu_unload(&svm->vcpu);
                svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
                nested_svm_init_mmu_context(&svm->vcpu);
        }
index fc37bbd23eb8b4c996cddeb1c1f6c39cc2a23f5d..abcb8d00b01486f431fc9fa591f6e6260c8e1153 100644 (file)
@@ -55,10 +55,10 @@ struct addr_marker {
 enum address_markers_idx {
        USER_SPACE_NR = 0,
        KERNEL_SPACE_NR,
-       LOW_KERNEL_NR,
-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
        LDT_NR,
 #endif
+       LOW_KERNEL_NR,
        VMALLOC_START_NR,
        VMEMMAP_START_NR,
 #ifdef CONFIG_KASAN
@@ -66,9 +66,6 @@ enum address_markers_idx {
        KASAN_SHADOW_END_NR,
 #endif
        CPU_ENTRY_AREA_NR,
-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
-       LDT_NR,
-#endif
 #ifdef CONFIG_X86_ESPFIX64
        ESPFIX_START_NR,
 #endif
@@ -512,11 +509,11 @@ static inline bool is_hypervisor_range(int idx)
 {
 #ifdef CONFIG_X86_64
        /*
-        * ffff800000000000 - ffff87ffffffffff is reserved for
-        * the hypervisor.
+        * A hole in the beginning of kernel address space reserved
+        * for a hypervisor.
         */
-       return  (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
-               (idx <  pgd_index(__PAGE_OFFSET));
+       return  (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
+               (idx <  pgd_index(GUARD_HOLE_END_ADDR));
 #else
        return false;
 #endif
index db7a1008223886d398c531d8d34720cd0265d17d..a1bcde35db4cac8468a4a344094eee9daacb0f41 100644 (file)
@@ -285,20 +285,16 @@ static void cpa_flush_all(unsigned long cache)
        on_each_cpu(__cpa_flush_all, (void *) cache, 1);
 }
 
-static bool __cpa_flush_range(unsigned long start, int numpages, int cache)
+static bool __inv_flush_all(int cache)
 {
        BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
 
-       WARN_ON(PAGE_ALIGN(start) != start);
-
        if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
                cpa_flush_all(cache);
                return true;
        }
 
-       flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
-
-       return !cache;
+       return false;
 }
 
 static void cpa_flush_range(unsigned long start, int numpages, int cache)
@@ -306,7 +302,14 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
        unsigned int i, level;
        unsigned long addr;
 
-       if (__cpa_flush_range(start, numpages, cache))
+       WARN_ON(PAGE_ALIGN(start) != start);
+
+       if (__inv_flush_all(cache))
+               return;
+
+       flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
+
+       if (!cache)
                return;
 
        /*
@@ -332,7 +335,12 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start,
 {
        unsigned int i, level;
 
-       if (__cpa_flush_range(baddr, numpages, cache))
+       if (__inv_flush_all(cache))
+               return;
+
+       flush_tlb_all();
+
+       if (!cache)
                return;
 
        /*
index 08013524fba18ebe2afc44bf798c5ad77839fa0b..4fe956a63b25b54fe479aadfeb51dfdf66ef74d2 100644 (file)
@@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address)
         * for a "decoy" virtual address (bit 63 clear) passed to
         * set_memory_X(). __pa() on a "decoy" address results in a
         * physical address with bit 63 set.
+        *
+        * Decoy addresses are not present for 32-bit builds, see
+        * set_mce_nospec().
         */
-       return address & __PHYSICAL_MASK;
+       if (IS_ENABLED(CONFIG_X86_64))
+               return address & __PHYSICAL_MASK;
+       return address;
 }
 
 /*
@@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
 
        start = sanitize_phys(start);
        end = sanitize_phys(end);
-       BUG_ON(start >= end); /* end is exclusive */
+       if (start >= end) {
+               WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
+                               start, end - 1, cattr_name(req_type));
+               return -EINVAL;
+       }
 
        if (!pat_enabled()) {
                /* This is identical to page table setting without PAT */
index a5d7ed12533707f8714e066cd4be4c30f880988d..0f4fe206dcc2015ce212b19c8865b06f854fb3b1 100644 (file)
@@ -648,19 +648,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
                          unsigned long limit)
 {
        int i, nr, flush = 0;
-       unsigned hole_low, hole_high;
+       unsigned hole_low = 0, hole_high = 0;
 
        /* The limit is the last byte to be touched */
        limit--;
        BUG_ON(limit >= FIXADDR_TOP);
 
+#ifdef CONFIG_X86_64
        /*
         * 64-bit has a great big hole in the middle of the address
-        * space, which contains the Xen mappings.  On 32-bit these
-        * will end up making a zero-sized hole and so is a no-op.
+        * space, which contains the Xen mappings.
         */
-       hole_low = pgd_index(USER_LIMIT);
-       hole_high = pgd_index(PAGE_OFFSET);
+       hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
+       hole_high = pgd_index(GUARD_HOLE_END_ADDR);
+#endif
 
        nr = pgd_index(limit) + 1;
        for (i = 0; i < nr; i++) {
index 05813fbf3daf25f4aeb6ea0233f13c13dd95ad73..647dfbbc4e1cf44ac989a1562e2c2f711fca5d36 100644 (file)
@@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg,
        struct spi_device *spi = to_spi_device(dev);
        u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
 
-       return spi_write(spi, (const u8 *)&word, sizeof(word));
+       return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
 }
 
 /* A read from the MAX7301 means two transfers; here, one message each */
@@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg)
        struct spi_device *spi = to_spi_device(dev);
 
        word = 0x8000 | (reg << 8);
-       ret = spi_write(spi, (const u8 *)&word, sizeof(word));
-       if (ret)
-               return ret;
-       /*
-        * This relies on the fact, that a transfer with NULL tx_buf shifts out
-        * zero bytes (=NOOP for MAX7301)
-        */
-       ret = spi_read(spi, (u8 *)&word, sizeof(word));
+       ret = spi_write_then_read(spi, &word, sizeof(word), &word,
+                                 sizeof(word));
        if (ret)
                return ret;
        return word & 0xff;
index 6e02148c208b2cc600d75263d1c87064db2ebc23..adc768f908f1ae1937f3d245088c590b449e574f 100644 (file)
@@ -773,9 +773,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
                                     "marvell,armada-370-gpio"))
                return 0;
 
-       if (IS_ERR(mvchip->clk))
-               return PTR_ERR(mvchip->clk);
-
        /*
         * There are only two sets of PWM configuration registers for
         * all the GPIO lines on those SoCs which this driver reserves
@@ -786,6 +783,9 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
        if (!res)
                return 0;
 
+       if (IS_ERR(mvchip->clk))
+               return PTR_ERR(mvchip->clk);
+
        /*
         * Use set A for lines of GPIO chip with id 0, B for GPIO chip
         * with id 1. Don't allow further GPIO chips to be used for PWM.
index 9887c3db6e16ace91790fbb938452584512e45c5..5b3e83cd71378b0ee81f83a51e4a69380d752fef 100644 (file)
@@ -32,7 +32,6 @@
 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
 
 #define OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER    BIT(2)
-#define OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN       BIT(1)
 
 struct gpio_regs {
        u32 irqenable1;
@@ -379,18 +378,9 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
                        readl_relaxed(bank->base + bank->regs->fallingdetect);
 
        if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
-               /* Defer wkup_en register update until we idle? */
-               if (bank->quirks & OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN) {
-                       if (trigger)
-                               bank->context.wake_en |= gpio_bit;
-                       else
-                               bank->context.wake_en &= ~gpio_bit;
-               } else {
-                       omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit,
-                                     trigger != 0);
-                       bank->context.wake_en =
-                               readl_relaxed(bank->base + bank->regs->wkup_en);
-               }
+               omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
+               bank->context.wake_en =
+                       readl_relaxed(bank->base + bank->regs->wkup_en);
        }
 
        /* This part needs to be executed always for OMAP{34xx, 44xx} */
@@ -942,44 +932,6 @@ omap2_gpio_disable_level_quirk(struct gpio_bank *bank)
                       bank->base + bank->regs->risingdetect);
 }
 
-/*
- * On omap4 and later SoC variants a level interrupt with wkup_en
- * enabled blocks the GPIO functional clock from idling until the GPIO
- * instance has been reset. To avoid that, we must set wkup_en only for
- * idle for level interrupts, and clear level registers for the duration
- * of idle. The level interrupts will be still there on wakeup by their
- * nature.
- */
-static void __maybe_unused
-omap4_gpio_enable_level_quirk(struct gpio_bank *bank)
-{
-       /* Update wake register for idle, edge bits might be already set */
-       writel_relaxed(bank->context.wake_en,
-                      bank->base + bank->regs->wkup_en);
-
-       /* Clear level registers for idle */
-       writel_relaxed(0, bank->base + bank->regs->leveldetect0);
-       writel_relaxed(0, bank->base + bank->regs->leveldetect1);
-}
-
-static void __maybe_unused
-omap4_gpio_disable_level_quirk(struct gpio_bank *bank)
-{
-       /* Restore level registers after idle */
-       writel_relaxed(bank->context.leveldetect0,
-                      bank->base + bank->regs->leveldetect0);
-       writel_relaxed(bank->context.leveldetect1,
-                      bank->base + bank->regs->leveldetect1);
-
-       /* Clear saved wkup_en for level, it will be set for next idle again */
-       bank->context.wake_en &= ~(bank->context.leveldetect0 |
-                                  bank->context.leveldetect1);
-
-       /* Update wake with only edge configuration */
-       writel_relaxed(bank->context.wake_en,
-                      bank->base + bank->regs->wkup_en);
-}
-
 /*---------------------------------------------------------------------*/
 
 static int omap_mpuio_suspend_noirq(struct device *dev)
@@ -1412,12 +1364,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
                                omap_set_gpio_dataout_mask_multiple;
        }
 
-       if (bank->quirks & OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN) {
-               bank->funcs.idle_enable_level_quirk =
-                       omap4_gpio_enable_level_quirk;
-               bank->funcs.idle_disable_level_quirk =
-                       omap4_gpio_disable_level_quirk;
-       } else if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
+       if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
                bank->funcs.idle_enable_level_quirk =
                        omap2_gpio_enable_level_quirk;
                bank->funcs.idle_disable_level_quirk =
@@ -1806,8 +1753,7 @@ static const struct omap_gpio_platform_data omap4_pdata = {
        .regs = &omap4_gpio_regs,
        .bank_width = 32,
        .dbck_flag = true,
-       .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER |
-                 OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN,
+       .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
 };
 
 static const struct of_device_id omap_gpio_match[] = {
index 55b72fbe163169c29766fbb6e24f58ccfdfd62c2..7f93954c58ea47f3be84c0955d6887f45fac5c25 100644 (file)
 
 #include "gpiolib.h"
 
+/**
+ * struct acpi_gpio_event - ACPI GPIO event handler data
+ *
+ * @node:        list-entry of the events list of the struct acpi_gpio_chip
+ * @handle:      handle of ACPI method to execute when the IRQ triggers
+ * @handler:     irq_handler to pass to request_irq when requesting the IRQ
+ * @pin:         GPIO pin number on the gpio_chip
+ * @irq:         Linux IRQ number for the event, for request_ / free_irq
+ * @irqflags:     flags to pass to request_irq when requesting the IRQ
+ * @irq_is_wake:  If the ACPI flags indicate the IRQ is a wakeup source
+ * @is_requested: True if request_irq has been done
+ * @desc:        gpio_desc for the GPIO pin for this event
+ */
 struct acpi_gpio_event {
        struct list_head node;
        acpi_handle handle;
+       irq_handler_t handler;
        unsigned int pin;
        unsigned int irq;
+       unsigned long irqflags;
+       bool irq_is_wake;
+       bool irq_requested;
        struct gpio_desc *desc;
 };
 
@@ -49,10 +66,10 @@ struct acpi_gpio_chip {
 
 /*
  * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
- * (so builtin drivers) we register the ACPI GpioInt event handlers from a
+ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
  * late_initcall_sync handler, so that other builtin drivers can register their
  * OpRegions before the event handlers can run.  This list contains gpiochips
- * for which the acpi_gpiochip_request_interrupts() has been deferred.
+ * for which the acpi_gpiochip_request_irqs() call has been deferred.
  */
 static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
 static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
@@ -133,8 +150,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
 }
 EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
 
-static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
-                                                  void *context)
+static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
+                                     struct acpi_gpio_event *event)
+{
+       int ret, value;
+
+       ret = request_threaded_irq(event->irq, NULL, event->handler,
+                                  event->irqflags, "ACPI:Event", event);
+       if (ret) {
+               dev_err(acpi_gpio->chip->parent,
+                       "Failed to setup interrupt handler for %d\n",
+                       event->irq);
+               return;
+       }
+
+       if (event->irq_is_wake)
+               enable_irq_wake(event->irq);
+
+       event->irq_requested = true;
+
+       /* Make sure we trigger the initial state of edge-triggered IRQs */
+       value = gpiod_get_raw_value_cansleep(event->desc);
+       if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+           ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+               event->handler(event->irq, event);
+}
+
+static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
+{
+       struct acpi_gpio_event *event;
+
+       list_for_each_entry(event, &acpi_gpio->events, node)
+               acpi_gpiochip_request_irq(acpi_gpio, event);
+}
+
+static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
+                                            void *context)
 {
        struct acpi_gpio_chip *acpi_gpio = context;
        struct gpio_chip *chip = acpi_gpio->chip;
@@ -143,8 +194,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        struct acpi_gpio_event *event;
        irq_handler_t handler = NULL;
        struct gpio_desc *desc;
-       unsigned long irqflags;
-       int ret, pin, irq, value;
+       int ret, pin, irq;
 
        if (!acpi_gpio_get_irq_resource(ares, &agpio))
                return AE_OK;
@@ -175,8 +225,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        gpiod_direction_input(desc);
 
-       value = gpiod_get_value_cansleep(desc);
-
        ret = gpiochip_lock_as_irq(chip, pin);
        if (ret) {
                dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -189,64 +237,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
                goto fail_unlock_irq;
        }
 
-       irqflags = IRQF_ONESHOT;
+       event = kzalloc(sizeof(*event), GFP_KERNEL);
+       if (!event)
+               goto fail_unlock_irq;
+
+       event->irqflags = IRQF_ONESHOT;
        if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
                if (agpio->polarity == ACPI_ACTIVE_HIGH)
-                       irqflags |= IRQF_TRIGGER_HIGH;
+                       event->irqflags |= IRQF_TRIGGER_HIGH;
                else
-                       irqflags |= IRQF_TRIGGER_LOW;
+                       event->irqflags |= IRQF_TRIGGER_LOW;
        } else {
                switch (agpio->polarity) {
                case ACPI_ACTIVE_HIGH:
-                       irqflags |= IRQF_TRIGGER_RISING;
+                       event->irqflags |= IRQF_TRIGGER_RISING;
                        break;
                case ACPI_ACTIVE_LOW:
-                       irqflags |= IRQF_TRIGGER_FALLING;
+                       event->irqflags |= IRQF_TRIGGER_FALLING;
                        break;
                default:
-                       irqflags |= IRQF_TRIGGER_RISING |
-                                   IRQF_TRIGGER_FALLING;
+                       event->irqflags |= IRQF_TRIGGER_RISING |
+                                          IRQF_TRIGGER_FALLING;
                        break;
                }
        }
 
-       event = kzalloc(sizeof(*event), GFP_KERNEL);
-       if (!event)
-               goto fail_unlock_irq;
-
        event->handle = evt_handle;
+       event->handler = handler;
        event->irq = irq;
+       event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
        event->pin = pin;
        event->desc = desc;
 
-       ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
-                                  "ACPI:Event", event);
-       if (ret) {
-               dev_err(chip->parent,
-                       "Failed to setup interrupt handler for %d\n",
-                       event->irq);
-               goto fail_free_event;
-       }
-
-       if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
-               enable_irq_wake(irq);
-
        list_add_tail(&event->node, &acpi_gpio->events);
 
-       /*
-        * Make sure we trigger the initial state of the IRQ when using RISING
-        * or FALLING.  Note we run the handlers on late_init, the AML code
-        * may refer to OperationRegions from other (builtin) drivers which
-        * may be probed after us.
-        */
-       if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-           ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
-               handler(event->irq, event);
-
        return AE_OK;
 
-fail_free_event:
-       kfree(event);
 fail_unlock_irq:
        gpiochip_unlock_as_irq(chip, pin);
 fail_free_desc:
@@ -283,6 +309,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       acpi_walk_resources(handle, "_AEI",
+                           acpi_gpiochip_alloc_event, acpi_gpio);
+
        mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
        defer = !acpi_gpio_deferred_req_irqs_done;
        if (defer)
@@ -293,8 +322,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        if (defer)
                return;
 
-       acpi_walk_resources(handle, "_AEI",
-                           acpi_gpiochip_request_interrupt, acpi_gpio);
+       acpi_gpiochip_request_irqs(acpi_gpio);
 }
 EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
 
@@ -331,10 +359,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
-               if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
-                       disable_irq_wake(event->irq);
+               if (event->irq_requested) {
+                       if (event->irq_is_wake)
+                               disable_irq_wake(event->irq);
+
+                       free_irq(event->irq, event);
+               }
 
-               free_irq(event->irq, event);
                desc = event->desc;
                if (WARN_ON(IS_ERR(desc)))
                        continue;
@@ -1200,23 +1231,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
        return con_id == NULL;
 }
 
-/* Run deferred acpi_gpiochip_request_interrupts() */
-static int acpi_gpio_handle_deferred_request_interrupts(void)
+/* Run deferred acpi_gpiochip_request_irqs() */
+static int acpi_gpio_handle_deferred_request_irqs(void)
 {
        struct acpi_gpio_chip *acpi_gpio, *tmp;
 
        mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
        list_for_each_entry_safe(acpi_gpio, tmp,
                                 &acpi_gpio_deferred_req_irqs_list,
-                                deferred_req_irqs_list_entry) {
-               acpi_handle handle;
-
-               handle = ACPI_HANDLE(acpi_gpio->chip->parent);
-               acpi_walk_resources(handle, "_AEI",
-                                   acpi_gpiochip_request_interrupt, acpi_gpio);
-
-               list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
-       }
+                                deferred_req_irqs_list_entry)
+               acpi_gpiochip_request_irqs(acpi_gpio);
 
        acpi_gpio_deferred_req_irqs_done = true;
        mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
@@ -1224,4 +1248,4 @@ static int acpi_gpio_handle_deferred_request_interrupts(void)
        return 0;
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
+late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
index 94bd872d56c48b24512ef660849237781e6316c0..7e6746b2d704c588db58f950461d293d4a1fd018 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <linux/pci.h>
 #include <linux/export.h>
+#include <linux/nospec.h>
 
 /**
  * DOC: getunique and setversion story
@@ -800,13 +801,17 @@ long drm_ioctl(struct file *filp,
 
        if (is_driver_ioctl) {
                /* driver ioctl */
-               if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+               unsigned int index = nr - DRM_COMMAND_BASE;
+
+               if (index >= dev->driver->num_ioctls)
                        goto err_i1;
-               ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+               index = array_index_nospec(index, dev->driver->num_ioctls);
+               ioctl = &dev->driver->ioctls[index];
        } else {
                /* core ioctl */
                if (nr >= DRM_CORE_IOCTL_COUNT)
                        goto err_i1;
+               nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
                ioctl = &drm_ioctls[nr];
        }
 
@@ -888,6 +893,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
 
        if (nr >= DRM_CORE_IOCTL_COUNT)
                return false;
+       nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
 
        *flags = drm_ioctls[nr].flags;
        return true;
index e99c3bb5835137c8ad02cda35b6475eacc363355..4e67d5ed480e627d2d19042974bd6eac78dcc9a4 100644 (file)
@@ -342,7 +342,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev)
        pci_free_irq_vectors(pdev);
 }
 
-static int gpu_i2c_resume(struct device *dev)
+static __maybe_unused int gpu_i2c_resume(struct device *dev)
 {
        struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
 
index a7dc286f406c992ebd55d764808691101dfff690..840e53732753f556a6f89e7afde3b2c1bf65edcc 100644 (file)
@@ -126,12 +126,8 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
 {
        struct omap4_keypad *keypad_data = dev_id;
 
-       if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) {
-               /* Disable interrupts */
-               kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
-                                OMAP4_VAL_IRQDISABLE);
+       if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS))
                return IRQ_WAKE_THREAD;
-       }
 
        return IRQ_NONE;
 }
@@ -173,11 +169,6 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
        kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
                         kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
 
-       /* enable interrupts */
-       kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
-               OMAP4_DEF_IRQENABLE_EVENTEN |
-                               OMAP4_DEF_IRQENABLE_LONGKEY);
-
        return IRQ_HANDLED;
 }
 
@@ -214,9 +205,10 @@ static void omap4_keypad_close(struct input_dev *input)
 
        disable_irq(keypad_data->irq);
 
-       /* Disable interrupts */
+       /* Disable interrupts and wake-up events */
        kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
                         OMAP4_VAL_IRQDISABLE);
+       kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0);
 
        /* clear pending interrupts */
        kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
@@ -365,7 +357,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
        }
 
        error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
-                                    omap4_keypad_irq_thread_fn, 0,
+                                    omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
                                     "omap4-keypad", keypad_data);
        if (error) {
                dev_err(&pdev->dev, "failed to register interrupt\n");
index 2d95e8d93cc761aefb102217473d940faf1e4d02..9fe075c137dc41bb513060d5b69871ed922e493f 100644 (file)
@@ -1767,6 +1767,18 @@ static int elantech_smbus = IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) ?
 module_param_named(elantech_smbus, elantech_smbus, int, 0644);
 MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device.");
 
+static const char * const i2c_blacklist_pnp_ids[] = {
+       /*
+        * These are known to not be working properly as bits are missing
+        * in elan_i2c.
+        */
+       "LEN2131", /* ThinkPad P52 w/ NFC */
+       "LEN2132", /* ThinkPad P52 */
+       "LEN2133", /* ThinkPad P72 w/ NFC */
+       "LEN2134", /* ThinkPad P72 */
+       NULL
+};
+
 static int elantech_create_smbus(struct psmouse *psmouse,
                                 struct elantech_device_info *info,
                                 bool leave_breadcrumbs)
@@ -1802,10 +1814,12 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
 
        if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) {
                /*
-                * New ICs are enabled by default.
+                * New ICs are enabled by default, unless mentioned in
+                * i2c_blacklist_pnp_ids.
                 * Old ICs are up to the user to decide.
                 */
-               if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+               if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+                   psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
                        return -ENXIO;
        }
 
index 2bd5bb11c8baec85bb9422dbfb0612e6bfed77f2..b6da0c1267e36e96cdc3b808e1fae122bec7add4 100644 (file)
@@ -171,6 +171,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0046", /* X250 */
        "LEN004a", /* W541 */
        "LEN005b", /* P50 */
+       "LEN005e", /* T560 */
        "LEN0071", /* T480 */
        "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
        "LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -178,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0096", /* X280 */
        "LEN0097", /* X280 -> ALPS trackpoint */
        "LEN200f", /* T450s */
+       "SYN3052", /* HP EliteBook 840 G4 */
        "SYN3221", /* HP 15-ay000 */
        NULL
 };
index a4e3454133a47eacdcc61034c11b6c98d831e9c8..09170b707339e57be9833cbc9b25fca6d738c424 100644 (file)
@@ -1101,10 +1101,10 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
        ubi_wl_close(ubi);
        ubi_free_internal_volumes(ubi);
        vfree(ubi->vtbl);
-       put_mtd_device(ubi->mtd);
        vfree(ubi->peb_buf);
        vfree(ubi->fm_buf);
        ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
+       put_mtd_device(ubi->mtd);
        put_device(&ubi->dev);
        return 0;
 }
index e9e9ecbcedcc384aee39a7b3e08f9d23cffd39aa..0b8f0c46268dae932896b7438e15fe8205b9016f 100644 (file)
@@ -227,9 +227,9 @@ out_unlock:
 out_free:
        kfree(desc);
 out_put_ubi:
-       ubi_put_device(ubi);
        ubi_err(ubi, "cannot open device %d, volume %d, error %d",
                ubi_num, vol_id, err);
+       ubi_put_device(ubi);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(ubi_open_volume);
index 9e7ef7ec2d70f0d84002dbfed141fa325d20ad8d..a8999f930b224ec6d6af2d74479270a43a000f56 100644 (file)
@@ -97,7 +97,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
        if (rc)
                goto finished;
 
-       smb2_set_next_command(server, &rqst[num_rqst++]);
+       smb2_set_next_command(server, &rqst[num_rqst++], 0);
 
        /* Operation */
        switch (command) {
@@ -111,7 +111,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
                                SMB2_O_INFO_FILE, 0,
                                sizeof(struct smb2_file_all_info) +
                                          PATH_MAX * 2, 0, NULL);
-               smb2_set_next_command(server, &rqst[num_rqst]);
+               smb2_set_next_command(server, &rqst[num_rqst], 0);
                smb2_set_related(&rqst[num_rqst++]);
                break;
        case SMB2_OP_DELETE:
@@ -127,14 +127,14 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
                rqst[num_rqst].rq_iov = si_iov;
                rqst[num_rqst].rq_nvec = 1;
 
-               size[0] = 8;
+               size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
                data[0] = &delete_pending[0];
 
                rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
                                        COMPOUND_FID, current->tgid,
                                        FILE_DISPOSITION_INFORMATION,
                                        SMB2_O_INFO_FILE, 0, data, size);
-               smb2_set_next_command(server, &rqst[num_rqst]);
+               smb2_set_next_command(server, &rqst[num_rqst], 1);
                smb2_set_related(&rqst[num_rqst++]);
                break;
        case SMB2_OP_SET_EOF:
@@ -149,7 +149,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
                                        COMPOUND_FID, current->tgid,
                                        FILE_END_OF_FILE_INFORMATION,
                                        SMB2_O_INFO_FILE, 0, data, size);
-               smb2_set_next_command(server, &rqst[num_rqst]);
+               smb2_set_next_command(server, &rqst[num_rqst], 0);
                smb2_set_related(&rqst[num_rqst++]);
                break;
        case SMB2_OP_SET_INFO:
@@ -165,7 +165,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
                                        COMPOUND_FID, current->tgid,
                                        FILE_BASIC_INFORMATION,
                                        SMB2_O_INFO_FILE, 0, data, size);
-               smb2_set_next_command(server, &rqst[num_rqst]);
+               smb2_set_next_command(server, &rqst[num_rqst], 0);
                smb2_set_related(&rqst[num_rqst++]);
                break;
        case SMB2_OP_RENAME:
@@ -189,7 +189,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
                                        COMPOUND_FID, current->tgid,
                                        FILE_RENAME_INFORMATION,
                                        SMB2_O_INFO_FILE, 0, data, size);
-               smb2_set_next_command(server, &rqst[num_rqst]);
+               smb2_set_next_command(server, &rqst[num_rqst], 0);
                smb2_set_related(&rqst[num_rqst++]);
                break;
        case SMB2_OP_HARDLINK:
@@ -213,7 +213,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
                                        COMPOUND_FID, current->tgid,
                                        FILE_LINK_INFORMATION,
                                        SMB2_O_INFO_FILE, 0, data, size);
-               smb2_set_next_command(server, &rqst[num_rqst]);
+               smb2_set_next_command(server, &rqst[num_rqst], 0);
                smb2_set_related(&rqst[num_rqst++]);
                break;
        default:
index 225fec1cfa673360d794058e5acb1d5737acdd32..e25c7aade98a41e2b6c6268239d31e33648cac05 100644 (file)
@@ -1194,7 +1194,7 @@ smb2_ioctl_query_info(const unsigned int xid,
        rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
        if (rc)
                goto iqinf_exit;
-       smb2_set_next_command(ses->server, &rqst[0]);
+       smb2_set_next_command(ses->server, &rqst[0], 0);
 
        /* Query */
        memset(&qi_iov, 0, sizeof(qi_iov));
@@ -1208,7 +1208,7 @@ smb2_ioctl_query_info(const unsigned int xid,
                                  qi.output_buffer_length, buffer);
        if (rc)
                goto iqinf_exit;
-       smb2_set_next_command(ses->server, &rqst[1]);
+       smb2_set_next_command(ses->server, &rqst[1], 0);
        smb2_set_related(&rqst[1]);
 
        /* Close */
@@ -1761,16 +1761,23 @@ smb2_set_related(struct smb_rqst *rqst)
 char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
 
 void
-smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+                     bool has_space_for_padding)
 {
        struct smb2_sync_hdr *shdr;
        unsigned long len = smb_rqst_len(server, rqst);
 
        /* SMB headers in a compound are 8 byte aligned. */
        if (len & 7) {
-               rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
-               rqst->rq_iov[rqst->rq_nvec].iov_len = 8 - (len & 7);
-               rqst->rq_nvec++;
+               if (has_space_for_padding) {
+                       len = rqst->rq_iov[rqst->rq_nvec - 1].iov_len;
+                       rqst->rq_iov[rqst->rq_nvec - 1].iov_len =
+                               (len + 7) & ~7;
+               } else {
+                       rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
+                       rqst->rq_iov[rqst->rq_nvec].iov_len = 8 - (len & 7);
+                       rqst->rq_nvec++;
+               }
                len = smb_rqst_len(server, rqst);
        }
 
@@ -1820,7 +1827,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &srch_path);
        if (rc)
                goto qfs_exit;
-       smb2_set_next_command(server, &rqst[0]);
+       smb2_set_next_command(server, &rqst[0], 0);
 
        memset(&qi_iov, 0, sizeof(qi_iov));
        rqst[1].rq_iov = qi_iov;
@@ -1833,7 +1840,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
                                  NULL);
        if (rc)
                goto qfs_exit;
-       smb2_set_next_command(server, &rqst[1]);
+       smb2_set_next_command(server, &rqst[1], 0);
        smb2_set_related(&rqst[1]);
 
        memset(&close_iov, 0, sizeof(close_iov));
index 9f4e9ed9ce53c23899e345d634b58c6e771253e2..2fe78acd7d0c14444a3372e3dbed6800b8bbc33f 100644 (file)
@@ -117,7 +117,8 @@ extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
 extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
                                  struct smb_rqst *rqst);
 extern void smb2_set_next_command(struct TCP_Server_Info *server,
-                                 struct smb_rqst *rqst);
+                                 struct smb_rqst *rqst,
+                                 bool has_space_for_padding);
 extern void smb2_set_related(struct smb_rqst *rqst);
 
 /*
index 529856fbccd0ee5f6559519a3d9db12cf932a775..bc1e082d921d152a6df154378320ff4e5f3bce94 100644 (file)
@@ -12,9 +12,10 @@ config UBIFS_FS
        help
          UBIFS is a file system for flash devices which works on top of UBI.
 
+if UBIFS_FS
+
 config UBIFS_FS_ADVANCED_COMPR
        bool "Advanced compression options"
-       depends on UBIFS_FS
        help
          This option allows to explicitly choose which compressions, if any,
          are enabled in UBIFS. Removing compressors means inability to read
@@ -24,7 +25,6 @@ config UBIFS_FS_ADVANCED_COMPR
 
 config UBIFS_FS_LZO
        bool "LZO compression support" if UBIFS_FS_ADVANCED_COMPR
-       depends on UBIFS_FS
        default y
        help
           LZO compressor is generally faster than zlib but compresses worse.
@@ -32,14 +32,12 @@ config UBIFS_FS_LZO
 
 config UBIFS_FS_ZLIB
        bool "ZLIB compression support" if UBIFS_FS_ADVANCED_COMPR
-       depends on UBIFS_FS
        default y
        help
          Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
 
 config UBIFS_ATIME_SUPPORT
-       bool "Access time support" if UBIFS_FS
-       depends on UBIFS_FS
+       bool "Access time support"
        default n
        help
          Originally UBIFS did not support atime, because it looked like a bad idea due
@@ -54,7 +52,6 @@ config UBIFS_ATIME_SUPPORT
 
 config UBIFS_FS_XATTR
        bool "UBIFS XATTR support"
-       depends on UBIFS_FS
        default y
        help
          Saying Y here includes support for extended attributes (xattrs).
@@ -65,7 +62,7 @@ config UBIFS_FS_XATTR
 
 config UBIFS_FS_ENCRYPTION
        bool "UBIFS Encryption"
-       depends on UBIFS_FS && UBIFS_FS_XATTR && BLOCK
+       depends on UBIFS_FS_XATTR && BLOCK
        select FS_ENCRYPTION
        default n
        help
@@ -76,7 +73,7 @@ config UBIFS_FS_ENCRYPTION
 
 config UBIFS_FS_SECURITY
        bool "UBIFS Security Labels"
-       depends on UBIFS_FS && UBIFS_FS_XATTR
+       depends on UBIFS_FS_XATTR
        default y
        help
          Security labels provide an access control facility to support Linux
@@ -89,6 +86,7 @@ config UBIFS_FS_SECURITY
 
 config UBIFS_FS_AUTHENTICATION
        bool "UBIFS authentication support"
+       depends on KEYS
        select CRYPTO_HMAC
        help
          Enable authentication support for UBIFS. This feature offers protection
@@ -96,3 +94,5 @@ config UBIFS_FS_AUTHENTICATION
          If you say yes here you should also select a hashing algorithm such as
          sha256, these are not selected automatically since there are many
          different options.
+
+endif # UBIFS_FS
index d1d5e96350ddbd0ff549941655b0e4d8e089beef..b0c5f06128b5385cff7f8ca514f2c2a3f3296cb0 100644 (file)
@@ -1675,6 +1675,12 @@ int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash)
        if (!ubifs_authenticated(c))
                return 0;
 
+       if (!c->nroot) {
+               err = ubifs_read_nnode(c, NULL, 0);
+               if (err)
+                       return err;
+       }
+
        desc = ubifs_hash_get_desc(c);
        if (IS_ERR(desc))
                return PTR_ERR(desc);
@@ -1685,12 +1691,6 @@ int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash)
                goto out;
        }
 
-       if (!c->nroot) {
-               err = ubifs_read_nnode(c, NULL, 0);
-               if (err)
-                       return err;
-       }
-
        cnode = (struct ubifs_cnode *)c->nroot;
 
        while (cnode) {
index 75f961c4c0449505aaac3f085aa80b00acb0b8ab..0a0e65c07c6d644e34b6816895ffe72023763108 100644 (file)
@@ -212,6 +212,38 @@ static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
        return ubifs_tnc_remove_range(c, &min_key, &max_key);
 }
 
+/**
+ * inode_still_linked - check whether inode in question will be re-linked.
+ * @c: UBIFS file-system description object
+ * @rino: replay entry to test
+ *
+ * O_TMPFILE files can be re-linked, this means link count goes from 0 to 1.
+ * This case needs special care, otherwise all references to the inode will
+ * be removed upon the first replay entry of an inode with link count 0
+ * is found.
+ */
+static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
+{
+       struct replay_entry *r;
+
+       ubifs_assert(c, rino->deletion);
+       ubifs_assert(c, key_type(c, &rino->key) == UBIFS_INO_KEY);
+
+       /*
+        * Find the most recent entry for the inode behind @rino and check
+        * whether it is a deletion.
+        */
+       list_for_each_entry_reverse(r, &c->replay_list, list) {
+               ubifs_assert(c, r->sqnum >= rino->sqnum);
+               if (key_inum(c, &r->key) == key_inum(c, &rino->key))
+                       return r->deletion == 0;
+
+       }
+
+       ubifs_assert(c, 0);
+       return false;
+}
+
 /**
  * apply_replay_entry - apply a replay entry to the TNC.
  * @c: UBIFS file-system description object
@@ -239,6 +271,11 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
                        {
                                ino_t inum = key_inum(c, &r->key);
 
+                               if (inode_still_linked(c, r)) {
+                                       err = 0;
+                                       break;
+                               }
+
                                err = ubifs_tnc_remove_ino(c, inum);
                                break;
                        }
@@ -533,6 +570,28 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
        return data == 0xFFFFFFFF;
 }
 
+/* authenticate_sleb_hash and authenticate_sleb_hmac are split out for stack usage */
+static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash)
+{
+       SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
+
+       hash_desc->tfm = c->hash_tfm;
+       hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       ubifs_shash_copy_state(c, log_hash, hash_desc);
+       return crypto_shash_final(hash_desc, hash);
+}
+
+static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac)
+{
+       SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
+
+       hmac_desc->tfm = c->hmac_tfm;
+       hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac);
+}
+
 /**
  * authenticate_sleb - authenticate one scan LEB
  * @c: UBIFS file-system description object
@@ -574,21 +633,12 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
 
                if (snod->type == UBIFS_AUTH_NODE) {
                        struct ubifs_auth_node *auth = snod->node;
-                       SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
-                       SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
-
-                       hash_desc->tfm = c->hash_tfm;
-                       hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-                       ubifs_shash_copy_state(c, log_hash, hash_desc);
-                       err = crypto_shash_final(hash_desc, hash);
+                       err = authenticate_sleb_hash(c, log_hash, hash);
                        if (err)
                                goto out;
 
-                       hmac_desc->tfm = c->hmac_tfm;
-                       hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-                       err = crypto_shash_digest(hmac_desc, hash, c->hash_len,
-                                                 hmac);
+                       err = authenticate_sleb_hmac(c, hash, hmac);
                        if (err)
                                goto out;
 
index 75a69dd26d6eafa609a01a2f9969c22c37512346..3da90c951c2354eafc32dcaeae1134b17a0573c1 100644 (file)
 /* Default time granularity in nanoseconds */
 #define DEFAULT_TIME_GRAN 1000000000
 
+static int get_default_compressor(struct ubifs_info *c)
+{
+       if (ubifs_compr_present(c, UBIFS_COMPR_LZO))
+               return UBIFS_COMPR_LZO;
+
+       if (ubifs_compr_present(c, UBIFS_COMPR_ZLIB))
+               return UBIFS_COMPR_ZLIB;
+
+       return UBIFS_COMPR_NONE;
+}
+
 /**
  * create_default_filesystem - format empty UBI volume.
  * @c: UBIFS file-system description object
@@ -207,7 +218,7 @@ static int create_default_filesystem(struct ubifs_info *c)
        if (c->mount_opts.override_compr)
                sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
        else
-               sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);
+               sup->default_compr = cpu_to_le16(get_default_compressor(c));
 
        generate_random_uuid(sup->uuid);
 
index cdafa5edea491ef01aedbeea78ee65c51bdb3417..20561a60db9c4f077c68ee1a182eac38ff96d4dd 100644 (file)
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
 
-struct bug_entry {
+#ifdef CONFIG_BUG
+
 #ifdef CONFIG_GENERIC_BUG
+struct bug_entry {
 #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
        unsigned long   bug_addr;
 #else
@@ -33,10 +35,8 @@ struct bug_entry {
        unsigned short  line;
 #endif
        unsigned short  flags;
-#endif /* CONFIG_GENERIC_BUG */
 };
-
-#ifdef CONFIG_BUG
+#endif /* CONFIG_GENERIC_BUG */
 
 /*
  * Don't use BUG() or BUG_ON() unless there's really no way out; one
index 06396c1cf127f75bb357326883f1dcb69161ccf1..fc5004a4b07d7b5b546e07f7d952fd1094892ead 100644 (file)
@@ -99,13 +99,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
  * unique, to convince GCC not to merge duplicate inline asm statements.
  */
 #define annotate_reachable() ({                                                \
-       asm volatile("ANNOTATE_REACHABLE counter=%c0"                   \
-                    : : "i" (__COUNTER__));                            \
+       asm volatile("%c0:\n\t"                                         \
+                    ".pushsection .discard.reachable\n\t"              \
+                    ".long %c0b - .\n\t"                               \
+                    ".popsection\n\t" : : "i" (__COUNTER__));          \
 })
 #define annotate_unreachable() ({                                      \
-       asm volatile("ANNOTATE_UNREACHABLE counter=%c0"                 \
-                    : : "i" (__COUNTER__));                            \
+       asm volatile("%c0:\n\t"                                         \
+                    ".pushsection .discard.unreachable\n\t"            \
+                    ".long %c0b - .\n\t"                               \
+                    ".popsection\n\t" : : "i" (__COUNTER__));          \
 })
+#define ASM_UNREACHABLE                                                        \
+       "999:\n\t"                                                      \
+       ".pushsection .discard.unreachable\n\t"                         \
+       ".long 999b - .\n\t"                                            \
+       ".popsection\n\t"
 #else
 #define annotate_reachable()
 #define annotate_unreachable()
@@ -293,45 +302,6 @@ static inline void *offset_to_ptr(const int *off)
        return (void *)((unsigned long)off + *off);
 }
 
-#else /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-#ifndef LINKER_SCRIPT
-
-#ifdef CONFIG_STACK_VALIDATION
-.macro ANNOTATE_UNREACHABLE counter:req
-\counter:
-       .pushsection .discard.unreachable
-       .long \counter\()b -.
-       .popsection
-.endm
-
-.macro ANNOTATE_REACHABLE counter:req
-\counter:
-       .pushsection .discard.reachable
-       .long \counter\()b -.
-       .popsection
-.endm
-
-.macro ASM_UNREACHABLE
-999:
-       .pushsection .discard.unreachable
-       .long 999b - .
-       .popsection
-.endm
-#else /* CONFIG_STACK_VALIDATION */
-.macro ANNOTATE_UNREACHABLE counter:req
-.endm
-
-.macro ANNOTATE_REACHABLE counter:req
-.endm
-
-.macro ASM_UNREACHABLE
-.endm
-#endif /* CONFIG_STACK_VALIDATION */
-
-#endif /* LINKER_SCRIPT */
-#endif /* __KERNEL__ */
 #endif /* __ASSEMBLY__ */
 
 /* Compile time object size, -1 for unknown */
index f423f9b6577eb23849ebbbf87ee9549d74e62da2..5cc8083a4c890976c93d5ff5bc45357349f13f25 100644 (file)
@@ -1148,11 +1148,65 @@ out_error:
        return ret;
 }
 
+static int handle_exit_race(u32 __user *uaddr, u32 uval,
+                           struct task_struct *tsk)
+{
+       u32 uval2;
+
+       /*
+        * If PF_EXITPIDONE is not yet set, then try again.
+        */
+       if (tsk && !(tsk->flags & PF_EXITPIDONE))
+               return -EAGAIN;
+
+       /*
+        * Reread the user space value to handle the following situation:
+        *
+        * CPU0                         CPU1
+        *
+        * sys_exit()                   sys_futex()
+        *  do_exit()                    futex_lock_pi()
+        *                                futex_lock_pi_atomic()
+        *   exit_signals(tsk)              No waiters:
+        *    tsk->flags |= PF_EXITING;     *uaddr == 0x00000PID
+        *  mm_release(tsk)                 Set waiter bit
+        *   exit_robust_list(tsk) {        *uaddr = 0x80000PID;
+        *      Set owner died              attach_to_pi_owner() {
+        *    *uaddr = 0xC0000000;           tsk = get_task(PID);
+        *   }                               if (!tsk->flags & PF_EXITING) {
+        *  ...                                attach();
+        *  tsk->flags |= PF_EXITPIDONE;     } else {
+        *                                     if (!(tsk->flags & PF_EXITPIDONE))
+        *                                       return -EAGAIN;
+        *                                     return -ESRCH; <--- FAIL
+        *                                   }
+        *
+        * Returning ESRCH unconditionally is wrong here because the
+        * user space value has been changed by the exiting task.
+        *
+        * The same logic applies to the case where the exiting task is
+        * already gone.
+        */
+       if (get_futex_value_locked(&uval2, uaddr))
+               return -EFAULT;
+
+       /* If the user space value has changed, try again. */
+       if (uval2 != uval)
+               return -EAGAIN;
+
+       /*
+        * The exiting task did not have a robust list, the robust list was
+        * corrupted or the user space value in *uaddr is simply bogus.
+        * Give up and tell user space.
+        */
+       return -ESRCH;
+}
+
 /*
  * Lookup the task for the TID provided from user space and attach to
  * it after doing proper sanity checks.
  */
-static int attach_to_pi_owner(u32 uval, union futex_key *key,
+static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
                              struct futex_pi_state **ps)
 {
        pid_t pid = uval & FUTEX_TID_MASK;
@@ -1162,12 +1216,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
        /*
         * We are the first waiter - try to look up the real owner and attach
         * the new pi_state to it, but bail out when TID = 0 [1]
+        *
+        * The !pid check is paranoid. None of the call sites should end up
+        * with pid == 0, but better safe than sorry. Let the caller retry
         */
        if (!pid)
-               return -ESRCH;
+               return -EAGAIN;
        p = find_get_task_by_vpid(pid);
        if (!p)
-               return -ESRCH;
+               return handle_exit_race(uaddr, uval, NULL);
 
        if (unlikely(p->flags & PF_KTHREAD)) {
                put_task_struct(p);
@@ -1187,7 +1244,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
                 * set, we know that the task has finished the
                 * cleanup:
                 */
-               int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
+               int ret = handle_exit_race(uaddr, uval, p);
 
                raw_spin_unlock_irq(&p->pi_lock);
                put_task_struct(p);
@@ -1244,7 +1301,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
         * We are the first waiter - try to look up the owner based on
         * @uval and attach to it.
         */
-       return attach_to_pi_owner(uval, key, ps);
+       return attach_to_pi_owner(uaddr, uval, key, ps);
 }
 
 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
@@ -1352,7 +1409,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
         * attach to the owner. If that fails, no harm done, we only
         * set the FUTEX_WAITERS bit in the user space variable.
         */
-       return attach_to_pi_owner(uval, key, ps);
+       return attach_to_pi_owner(uaddr, newval, key, ps);
 }
 
 /**
index bd62b5eeb5a057b000d6b8019072853b5daa37d6..31f49ae80f43dd76e5eea85416dc362b45e60780 100644 (file)
@@ -289,9 +289,6 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
 {
        struct hrtimer *timer = &timr->it.real.timer;
 
-       if (!timr->it_interval)
-               return;
-
        timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
                                            timr->it_interval);
        hrtimer_restart(timer);
@@ -317,7 +314,7 @@ void posixtimer_rearm(struct kernel_siginfo *info)
        if (!timr)
                return;
 
-       if (timr->it_requeue_pending == info->si_sys_private) {
+       if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
                timr->kclock->timer_rearm(timr);
 
                timr->it_active = 1;
index bb015551c2d9ae11c67f276cc64d7f84ef5521fc..3d09844405c923c2d1a7fa04d6e1c15b6567180d 100644 (file)
@@ -115,9 +115,7 @@ __cc-option = $(call try-run,\
 
 # Do not attempt to build with gcc plugins during cc-option tests.
 # (And this uses delayed resolution so the flags will be up to date.)
-# In addition, do not include the asm macros which are built later.
-CC_OPTION_FILTERED = $(GCC_PLUGINS_CFLAGS) $(ASM_MACRO_FLAGS)
-CC_OPTION_CFLAGS = $(filter-out $(CC_OPTION_FILTERED),$(KBUILD_CFLAGS))
+CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
 
 # cc-option
 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
index a5b4af47987a7ab84af40d84d5b8b84f48973068..42c5d50f2bccf1d3ed8de3366526ae8279c31eb9 100644 (file)
@@ -4,8 +4,6 @@ OBJECT_FILES_NON_STANDARD := y
 hostprogs-y    := modpost mk_elfconfig
 always         := $(hostprogs-y) empty.o
 
-CFLAGS_REMOVE_empty.o := $(ASM_MACRO_FLAGS)
-
 modpost-objs   := modpost.o file2alias.o sumversion.o
 
 devicetable-offsets-file := devicetable-offsets.h