Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Aug 2016 21:10:23 +0000 (14:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Aug 2016 21:10:23 +0000 (14:10 -0700)
Pull virtio/vhost fixes and cleanups from Michael Tsirkin:
 "Misc fixes and cleanups all over the place"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio/s390: deprecate old transport
  virtio/s390: keep early_put_chars
  virtio_blk: Fix a slient kernel panic
  virtio-vsock: fix include guard typo
  vhost/vsock: fix vhost virtio_vsock_pkt use-after-free
  9p/trans_virtio: use kvfree() for iov_iter_get_pages_alloc()
  virtio: fix error handling for debug builds
  virtio: fix memory leak in virtqueue_add()

86 files changed:
Makefile
arch/Kconfig
arch/arm/Kconfig
arch/arm/boot/dts/keystone.dtsi
arch/arm/include/asm/uaccess.h
arch/arm/kernel/sys_oabi-compat.c
arch/arm64/Kconfig
arch/arm64/include/asm/uaccess.h
arch/ia64/Kconfig
arch/ia64/include/asm/uaccess.h
arch/metag/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/uaccess.h
arch/s390/Kconfig
arch/s390/lib/uaccess.c
arch/sparc/Kconfig
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/x86/Kconfig
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
arch/x86/lib/hweight.S
drivers/block/rbd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/misc/Makefile
drivers/misc/lkdtm_usercopy.c
drivers/platform/x86/dell-wmi.c
drivers/rapidio/rio_cm.c
drivers/scsi/ipr.c
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/send.c
fs/btrfs/tree-log.c
fs/ceph/caps.c
fs/ceph/mds_client.c
fs/pipe.c
include/drm/ttm/ttm_bo_driver.h
include/linux/mmzone.h
include/linux/printk.h
include/linux/slab.h
include/linux/thread_info.h
include/linux/uaccess.h
include/trace/events/timer.h
init/Kconfig
kernel/printk/internal.h
kernel/printk/nmi.c
kernel/printk/printk.c
lib/strncpy_from_user.c
lib/strnlen_user.c
mm/Makefile
mm/memcontrol.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slab.c
mm/slub.c
mm/usercopy.c [new file with mode: 0644]
net/ceph/mon_client.c
net/ceph/osd_client.c
net/ceph/string_table.c
scripts/Kbuild.include
scripts/Makefile.gcc-plugins
scripts/gcc-plugin.sh
scripts/gcc-plugins/Makefile
scripts/get_maintainer.pl
security/Kconfig

index 70de1448c5717b6914bc6626fe1663e4d52c1e95..8c504f3241544620a149d7b946a62e7535b3eef6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -635,13 +635,6 @@ endif
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS  += $(call cc-option,--param=allow-store-data-races=0)
 
-PHONY += gcc-plugins
-gcc-plugins: scripts_basic
-ifdef CONFIG_GCC_PLUGINS
-       $(Q)$(MAKE) $(build)=scripts/gcc-plugins
-endif
-       @:
-
 include scripts/Makefile.gcc-plugins
 
 ifdef CONFIG_READABLE_ASM
index bd8056b5b246058a8c06190811f875a00f30df8d..e9c9334507ddd57f2fd787f2faa3dac71edc18a7 100644 (file)
@@ -461,6 +461,15 @@ config CC_STACKPROTECTOR_STRONG
 
 endchoice
 
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+       bool
+       help
+         An architecture should select this if it can walk the kernel stack
+         frames to determine if an object is part of either the arguments
+         or local variables (i.e. that it excludes saved return addresses,
+         and similar) by implementing an inline arch_within_stack_frames(),
+         which is used by CONFIG_HARDENED_USERCOPY.
+
 config HAVE_CONTEXT_TRACKING
        bool
        help
index 2d601d769a1cdddae7bbba2bb22571731e3d4f5e..a9c4e48bb7ec997bec394066914d26f337a2fec4 100644 (file)
@@ -35,6 +35,7 @@ config ARM
        select HARDIRQS_SW_RESEND
        select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_MMAP_RND_BITS if MMU
index 00cb314d5e4db81fcc035b2a68101a557cf751a4..e23f46d15c806566abc2ec88828bc8d053ffd6e5 100644 (file)
                cpu_on          = <0x84000003>;
        };
 
-       psci {
-               compatible      = "arm,psci";
-               method          = "smc";
-               cpu_suspend     = <0x84000001>;
-               cpu_off         = <0x84000002>;
-               cpu_on          = <0x84000003>;
-       };
-
        soc {
                #address-cells = <1>;
                #size-cells = <1>;
index 62a6f65029e61aebf9b64e1df59fb9383f4064a9..a93c0f99acf7767c680158cf96acef87d1f0da51 100644 (file)
@@ -480,7 +480,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 static inline unsigned long __must_check
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       unsigned int __ua_flags = uaccess_save_and_enable();
+       unsigned int __ua_flags;
+
+       check_object_size(to, n, false);
+       __ua_flags = uaccess_save_and_enable();
        n = arm_copy_from_user(to, from, n);
        uaccess_restore(__ua_flags);
        return n;
@@ -495,11 +498,15 @@ static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
-       unsigned int __ua_flags = uaccess_save_and_enable();
+       unsigned int __ua_flags;
+
+       check_object_size(from, n, true);
+       __ua_flags = uaccess_save_and_enable();
        n = arm_copy_to_user(to, from, n);
        uaccess_restore(__ua_flags);
        return n;
 #else
+       check_object_size(from, n, true);
        return arm_copy_to_user(to, from, n);
 #endif
 }
index 087acb569b63a4bd90982e0c9b15fc2313636c53..5f221acd21aebb3ca1c2ee560fb68241bc1e02c9 100644 (file)
@@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
        mm_segment_t fs;
        long ret, err, i;
 
-       if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
+       if (maxevents <= 0 ||
+                       maxevents > (INT_MAX/sizeof(*kbuf)) ||
+                       maxevents > (INT_MAX/sizeof(*events)))
                return -EINVAL;
+       if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
+               return -EFAULT;
        kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
        if (!kbuf)
                return -ENOMEM;
@@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
 
        if (nsops < 1 || nsops > SEMOPM)
                return -EINVAL;
+       if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
+               return -EFAULT;
        sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
        if (!sops)
                return -ENOMEM;
index 69c8787bec7d3f3e343b592b01997667f3f8c53d..bc3f00f586f1111fa41c5e36d6530569f0a5b3a8 100644 (file)
@@ -54,6 +54,7 @@ config ARM64
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_HUGE_VMAP
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
index 5e834d10b2913d91bf3620c0e5930cdb5e14035e..c47257c91b77e3d6516000c0c8bec5705b97b6dc 100644 (file)
@@ -265,22 +265,25 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        kasan_check_write(to, n);
-       return  __arch_copy_from_user(to, from, n);
+       check_object_size(to, n, false);
+       return __arch_copy_from_user(to, from, n);
 }
 
 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        kasan_check_read(from, n);
-       return  __arch_copy_to_user(to, from, n);
+       check_object_size(from, n, true);
+       return __arch_copy_to_user(to, from, n);
 }
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        kasan_check_write(to, n);
 
-       if (access_ok(VERIFY_READ, from, n))
+       if (access_ok(VERIFY_READ, from, n)) {
+               check_object_size(to, n, false);
                n = __arch_copy_from_user(to, from, n);
-       else /* security hole - plug it */
+       else /* security hole - plug it */
                memset(to, 0, n);
        return n;
 }
@@ -289,8 +292,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
 {
        kasan_check_read(from, n);
 
-       if (access_ok(VERIFY_WRITE, to, n))
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               check_object_size(from, n, true);
                n = __arch_copy_to_user(to, from, n);
+       }
        return n;
 }
 
index 6a15083cc366df9c13962206952d266625034c7f..18ca6a9ce566cc3b8463d22ecfe3e27c25fc24f7 100644 (file)
@@ -52,6 +52,7 @@ config IA64
        select MODULES_USE_ELF_RELA
        select ARCH_USE_CMPXCHG_LOCKREF
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
index 2189d5ddc1eeef552dd602875ae7d34c8baf17f7..465c70982f40d8960925bcefa29bb9402167aba9 100644 (file)
@@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
+       if (!__builtin_constant_p(count))
+               check_object_size(from, count, true);
+
        return __copy_user(to, (__force void __user *) from, count);
 }
 
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
+       if (!__builtin_constant_p(count))
+               check_object_size(to, count, false);
+
        return __copy_user((__force void __user *) to, from, count);
 }
 
@@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
        const void *__cu_from = (from);                                                 \
        long __cu_len = (n);                                                            \
                                                                                        \
-       if (__access_ok(__cu_to, __cu_len, get_fs()))                                   \
-               __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);   \
+       if (__access_ok(__cu_to, __cu_len, get_fs())) {                                 \
+               if (!__builtin_constant_p(n))                                           \
+                       check_object_size(__cu_from, __cu_len, true);                   \
+               __cu_len = __copy_user(__cu_to, (__force void __user *)  __cu_from, __cu_len);  \
+       }                                                                               \
        __cu_len;                                                                       \
 })
 
@@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
        long __cu_len = (n);                                                            \
                                                                                        \
        __chk_user_ptr(__cu_from);                                                      \
-       if (__access_ok(__cu_from, __cu_len, get_fs()))                                 \
+       if (__access_ok(__cu_from, __cu_len, get_fs())) {                               \
+               if (!__builtin_constant_p(n))                                           \
+                       check_object_size(__cu_to, __cu_len, false);                    \
                __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);   \
+       }                                                                               \
        __cu_len;                                                                       \
 })
 
index 11fa51c89617deb1a303c6bfdbf82b2a32a1e4db..c0ec116b3993a3a61b852c9daf14a34ab0962e15 100644 (file)
@@ -390,7 +390,6 @@ void __init mem_init(void)
 
        free_all_bootmem();
        mem_init_print_info(NULL);
-       show_mem(0);
 }
 
 void free_initmem(void)
index ec4047e170a0e6dc5267509f36974f8cbaa1fa84..927d2ab2ce08a68c2574c41e6d1b732ecf597d9b 100644 (file)
@@ -166,6 +166,7 @@ config PPC
        select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
        select GENERIC_CPU_AUTOPROBE
        select HAVE_VIRT_CPU_ACCOUNTING
+       select HAVE_ARCH_HARDENED_USERCOPY
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
index b7c20f0b8fbeebe03a1c57a4c62f86c0c42e962d..c1dc6c14deb84a261ab19e509f12078a271937d1 100644 (file)
@@ -310,10 +310,15 @@ static inline unsigned long copy_from_user(void *to,
 {
        unsigned long over;
 
-       if (access_ok(VERIFY_READ, from, n))
+       if (access_ok(VERIFY_READ, from, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(to, n, false);
                return __copy_tofrom_user((__force void __user *)to, from, n);
+       }
        if ((unsigned long)from < TASK_SIZE) {
                over = (unsigned long)from + n - TASK_SIZE;
+               if (!__builtin_constant_p(n - over))
+                       check_object_size(to, n - over, false);
                return __copy_tofrom_user((__force void __user *)to, from,
                                n - over) + over;
        }
@@ -325,10 +330,15 @@ static inline unsigned long copy_to_user(void __user *to,
 {
        unsigned long over;
 
-       if (access_ok(VERIFY_WRITE, to, n))
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
                return __copy_tofrom_user(to, (__force void __user *)from, n);
+       }
        if ((unsigned long)to < TASK_SIZE) {
                over = (unsigned long)to + n - TASK_SIZE;
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n - over, true);
                return __copy_tofrom_user(to, (__force void __user *)from,
                                n - over) + over;
        }
@@ -372,6 +382,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
                if (ret == 0)
                        return 0;
        }
+
+       if (!__builtin_constant_p(n))
+               check_object_size(to, n, false);
+
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
@@ -398,6 +412,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
                if (ret == 0)
                        return 0;
        }
+       if (!__builtin_constant_p(n))
+               check_object_size(from, n, true);
+
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
 
index 5a2907ee8c934d8269356bf1c450f9d2350ce687..e751fe25d6ab670428f5c97b8464d9102baddbe6 100644 (file)
@@ -123,6 +123,7 @@ config S390
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_EARLY_PFN_TO_NID
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_JUMP_LABEL
        select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
        select HAVE_ARCH_SECCOMP_FILTER
index d96596128e9f2591c4cde57f3bb6b1bf256b30f9..f481fcde067ba145f6cc2ace122d9d0a543ebe00 100644 (file)
@@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
 
 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       check_object_size(to, n, false);
        if (static_branch_likely(&have_mvcos))
                return copy_from_user_mvcos(to, from, n);
        return copy_from_user_mvcp(to, from, n);
@@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
 
 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       check_object_size(from, n, true);
        if (static_branch_likely(&have_mvcos))
                return copy_to_user_mvcos(to, from, n);
        return copy_to_user_mvcs(to, from, n);
index 546293d9e6c52906f2c1f72d1a9545749595f994..59b09600dd326b8d0e24853d720d18d73837cd69 100644 (file)
@@ -43,6 +43,7 @@ config SPARC
        select OLD_SIGSUSPEND
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
+       select HAVE_ARCH_HARDENED_USERCOPY
 
 config SPARC32
        def_bool !64BIT
index 57aca2792d29f89203735f892ceab4b73340bd69..341a5a133f4837b98f5c9928865ccf6d088af419 100644 (file)
@@ -248,22 +248,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
 
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) to, n))
+       if (n && __access_ok((unsigned long) to, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
                return __copy_user(to, (__force void __user *) from, n);
-       else
+       else
                return n;
 }
 
 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       if (!__builtin_constant_p(n))
+               check_object_size(from, n, true);
        return __copy_user(to, (__force void __user *) from, n);
 }
 
 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) from, n))
+       if (n && __access_ok((unsigned long) from, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(to, n, false);
                return __copy_user((__force void __user *) to, from, n);
-       else
+       else
                return n;
 }
 
index e9a51d64974ddff102017ee8f86be01f7a41361a..8bda94fab8e8cc52baca57952391cbb7665afd1f 100644 (file)
@@ -210,8 +210,12 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long size)
 {
-       unsigned long ret = ___copy_from_user(to, from, size);
+       unsigned long ret;
 
+       if (!__builtin_constant_p(size))
+               check_object_size(to, size, false);
+
+       ret = ___copy_from_user(to, from, size);
        if (unlikely(ret))
                ret = copy_from_user_fixup(to, from, size);
 
@@ -227,8 +231,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long size)
 {
-       unsigned long ret = ___copy_to_user(to, from, size);
+       unsigned long ret;
 
+       if (!__builtin_constant_p(size))
+               check_object_size(from, size, true);
+       ret = ___copy_to_user(to, from, size);
        if (unlikely(ret))
                ret = copy_to_user_fixup(to, from, size);
        return ret;
index 5c6e7471b732335bf0b4272a4f274520d2bb4f4e..c580d8c33562ec5eba4dbfc273ae3ed7b6b67072 100644 (file)
@@ -80,6 +80,7 @@ config X86
        select HAVE_ALIGNED_STRUCT_PAGE         if SLUB
        select HAVE_AOUT                        if X86_32
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_HUGE_VMAP              if X86_64 || X86_PAE
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN                  if X86_64 && SPARSEMEM_VMEMMAP
@@ -91,6 +92,7 @@ config X86
        select HAVE_ARCH_SOFT_DIRTY             if X86_64
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ARCH_WITHIN_STACK_FRAMES
        select HAVE_EBPF_JIT                    if X86_64
        select HAVE_CC_STACKPROTECTOR
        select HAVE_CMPXCHG_DOUBLE
index 84b59846154a92be76ebd630a79283c9d99b2a39..8b7c8d8e0852cf50d5a03e24f89ae9c693a1891c 100644 (file)
@@ -176,6 +176,50 @@ static inline unsigned long current_stack_pointer(void)
        return sp;
 }
 
+/*
+ * Walks up the stack frames to make sure that the specified object is
+ * entirely contained by a single stack frame.
+ *
+ * Returns:
+ *              1 if within a frame
+ *             -1 if placed across a frame boundary (or outside stack)
+ *              0 unable to determine (no frame pointers, etc)
+ */
+static inline int arch_within_stack_frames(const void * const stack,
+                                          const void * const stackend,
+                                          const void *obj, unsigned long len)
+{
+#if defined(CONFIG_FRAME_POINTER)
+       const void *frame = NULL;
+       const void *oldframe;
+
+       oldframe = __builtin_frame_address(1);
+       if (oldframe)
+               frame = __builtin_frame_address(2);
+       /*
+        * low ----------------------------------------------> high
+        * [saved bp][saved ip][args][local vars][saved bp][saved ip]
+        *                     ^----------------^
+        *               allow copies only within here
+        */
+       while (stack <= frame && frame < stackend) {
+               /*
+                * If obj + len extends past the last frame, this
+                * check won't pass and the next frame will be 0,
+                * causing us to bail out and correctly report
+                * the copy as invalid.
+                */
+               if (obj + len <= frame)
+                       return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
+               oldframe = frame;
+               frame = *(const void * const *)frame;
+       }
+       return -1;
+#else
+       return 0;
+#endif
+}
+
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
index c03bfb68c50352df52d6ae4e36fa54bdc22dbc50..a0ae610b9280183fc1ca42ddd3fcc45333b433c5 100644 (file)
@@ -761,9 +761,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
         * case, and do only runtime checking for non-constant sizes.
         */
 
-       if (likely(sz < 0 || sz >= n))
+       if (likely(sz < 0 || sz >= n)) {
+               check_object_size(to, n, false);
                n = _copy_from_user(to, from, n);
-       else if(__builtin_constant_p(n))
+       } else if (__builtin_constant_p(n))
                copy_from_user_overflow();
        else
                __copy_from_user_overflow(sz, n);
@@ -781,9 +782,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        might_fault();
 
        /* See the comment in copy_from_user() above. */
-       if (likely(sz < 0 || sz >= n))
+       if (likely(sz < 0 || sz >= n)) {
+               check_object_size(from, n, true);
                n = _copy_to_user(to, from, n);
-       else if(__builtin_constant_p(n))
+       } else if (__builtin_constant_p(n))
                copy_to_user_overflow();
        else
                __copy_to_user_overflow(sz, n);
@@ -812,21 +814,21 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 #define user_access_begin()    __uaccess_begin()
 #define user_access_end()      __uaccess_end()
 
-#define unsafe_put_user(x, ptr)                                                \
-({                                                                             \
+#define unsafe_put_user(x, ptr, err_label)                                     \
+do {                                                                           \
        int __pu_err;                                                           \
        __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
-       __builtin_expect(__pu_err, 0);                                          \
-})
+       if (unlikely(__pu_err)) goto err_label;                                 \
+} while (0)
 
-#define unsafe_get_user(x, ptr)                                                \
-({                                                                             \
+#define unsafe_get_user(x, ptr, err_label)                                     \
+do {                                                                           \
        int __gu_err;                                                           \
        unsigned long __gu_val;                                                 \
        __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
-       __builtin_expect(__gu_err, 0);                                          \
-})
+       if (unlikely(__gu_err)) goto err_label;                                 \
+} while (0)
 
 #endif /* _ASM_X86_UACCESS_H */
 
index 4b32da24faaf1cb21fe3d4450a425c5193ee4b97..7d3bdd1ed6977b5e1f69dc8ba3e3d6cfa8f861a3 100644 (file)
@@ -37,6 +37,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
 static __always_inline unsigned long __must_check
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
+       check_object_size(from, n, true);
        return __copy_to_user_ll(to, from, n);
 }
 
@@ -95,6 +96,7 @@ static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        might_fault();
+       check_object_size(to, n, false);
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
index 2eac2aa3e37f30318f6e20ed1753ae29d472f251..673059a109fee067a6739704470f947fd7acae57 100644 (file)
@@ -54,6 +54,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
 {
        int ret = 0;
 
+       check_object_size(dst, size, false);
        if (!__builtin_constant_p(size))
                return copy_user_generic(dst, (__force void *)src, size);
        switch (size) {
@@ -119,6 +120,7 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
 {
        int ret = 0;
 
+       check_object_size(src, size, true);
        if (!__builtin_constant_p(size))
                return copy_user_generic((__force void *)dst, src, size);
        switch (size) {
index 02de3d74d2c5bb319d48503371f451672d66dc80..8a602a1e404a262f32fbe708e926e986636dcfca 100644 (file)
@@ -35,6 +35,7 @@ ENDPROC(__sw_hweight32)
 
 ENTRY(__sw_hweight64)
 #ifdef CONFIG_X86_64
+       pushq   %rdi
        pushq   %rdx
 
        movq    %rdi, %rdx                      # w -> t
@@ -60,6 +61,7 @@ ENTRY(__sw_hweight64)
        shrq    $56, %rax                       # w = w_tmp >> 56
 
        popq    %rdx
+       popq    %rdi
        ret
 #else /* CONFIG_X86_32 */
        /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
index 1a04af6d24212cdd16e5c8091d01f16e2b409384..6c6519f6492a4198c78cae1eaad5e33e03efd2d9 100644 (file)
@@ -3950,6 +3950,7 @@ static void rbd_dev_release(struct device *dev)
        bool need_put = !!rbd_dev->opts;
 
        ceph_oid_destroy(&rbd_dev->header_oid);
+       ceph_oloc_destroy(&rbd_dev->header_oloc);
 
        rbd_put_client(rbd_dev->rbd_client);
        rbd_spec_put(rbd_dev->spec);
@@ -5336,15 +5337,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
        }
        spec->pool_id = (u64)rc;
 
-       /* The ceph file layout needs to fit pool id in 32 bits */
-
-       if (spec->pool_id > (u64)U32_MAX) {
-               rbd_warn(NULL, "pool id too large (%llu > %u)",
-                               (unsigned long long)spec->pool_id, U32_MAX);
-               rc = -EIO;
-               goto err_out_client;
-       }
-
        rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
        if (!rbd_dev) {
                rc = -ENOMEM;
index ff63b88b0ffaf721a58d63f81bb47ab227ac93b9..5cc7052e391d4f732dc48ac0ed01316a713054df 100644 (file)
@@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        char *table = NULL;
-       int size, i;
+       int size;
 
        if (adev->pp_enabled)
                size = amdgpu_dpm_get_pp_table(adev, &table);
@@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
        if (size >= PAGE_SIZE)
                size = PAGE_SIZE - 1;
 
-       for (i = 0; i < size; i++) {
-               sprintf(buf + i, "%02x", table[i]);
-       }
-       sprintf(buf + i, "\n");
+       memcpy(buf, table, size);
 
        return size;
 }
index b7742e62972a61aae553dca11477e89ab8be6e2e..9b61c8ba7aaf915892c9c2c86e5d4ec22934f2d9 100644 (file)
@@ -335,7 +335,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+       r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
        ttm_bo_mem_put(bo, &tmp_mem);
        return r;
@@ -368,7 +368,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
index e2f0e5d58d5cc05abd29ed65037e38a060fd3145..a5c94b482459234a09c87a85300d820e2ee608a5 100644 (file)
@@ -5779,6 +5779,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
        default: BUG();
        }
 
index bff8668e9e6d466e059d70f719c0a818235040dc..b8184617ca250f143ee01779fa40f77edeee80e4 100644 (file)
@@ -270,7 +270,8 @@ static const u32 tonga_mgcg_cgcg_init[] =
 
 static const u32 golden_settings_polaris11_a11[] =
 {
-       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+       mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
+       mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
        mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
        mmDB_DEBUG2, 0xf00fffff, 0x00000400,
        mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -279,7 +280,7 @@ static const u32 golden_settings_polaris11_a11[] =
        mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
        mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
        mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
-       mmSQ_CONFIG, 0x07f80000, 0x07180000,
+       mmSQ_CONFIG, 0x07f80000, 0x01180000,
        mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
        mmTCC_CTRL, 0x00100000, 0xf31fff7f,
        mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
@@ -301,8 +302,8 @@ static const u32 polaris11_golden_common_all[] =
 static const u32 golden_settings_polaris10_a11[] =
 {
        mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
-       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
-       mmCB_HW_CONTROL_2, 0, 0x0f000000,
+       mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+       mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
        mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
        mmDB_DEBUG2, 0xf00fffff, 0x00000400,
        mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -409,6 +410,7 @@ static const u32 golden_settings_iceland_a11[] =
        mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
        mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
        mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+       mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
        mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
        mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
        mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -505,8 +507,10 @@ static const u32 cz_golden_settings_a11[] =
        mmGB_GPU_ID, 0x0000000f, 0x00000000,
        mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
        mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+       mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
        mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
        mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
+       mmTCC_CTRL, 0x00100000, 0xf31fff7f,
        mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
        mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
        mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
index d24a82bd0c7a119d7b450ee801f3b5ffa1dc51e3..0b0f08641eed67b12c779f402c08c66c459ac1f7 100644 (file)
@@ -144,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
                return 0;
        default: BUG();
        }
index 717359d3ba8c506b77b09d46663b7afc598e9df9..2aee2c6f3cd5b4719bdd97e38eeba43f6bb06718 100644 (file)
@@ -103,6 +103,11 @@ static const u32 stoney_mgcg_cgcg_init[] =
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
+static const u32 golden_settings_stoney_common[] =
+{
+       mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
+       mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
+};
 
 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 {
@@ -142,6 +147,9 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
                amdgpu_program_register_sequence(adev,
                                                 stoney_mgcg_cgcg_init,
                                                 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_stoney_common,
+                                                (const u32)ARRAY_SIZE(golden_settings_stoney_common));
                break;
        default:
                break;
index 80446e2d3ab6efd00f3d3e777e4178b26ee6ae5d..76bcb43e7c06ac2dcf56a4d183e61c869a3544c5 100644 (file)
@@ -185,14 +185,23 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
                goto out;
        }
 
+       /*
+        * cirrus_modeset_init() is initializing/registering the emulated fbdev
+        * and DRM internals can access/test some of the fields in
+        * mode_config->funcs as part of the fbdev registration process.
+        * Make sure dev->mode_config.funcs is properly set to avoid
+        * dereferencing a NULL pointer.
+        * FIXME: mode_config.funcs assignment should probably be done in
+        * cirrus_modeset_init() (that's a common pattern seen in other DRM
+        * drivers).
+        */
+       dev->mode_config.funcs = &cirrus_mode_funcs;
        r = cirrus_modeset_init(cdev);
        if (r) {
                dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
                goto out;
        }
 
-       dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
-
        return 0;
 out:
        cirrus_driver_unload(dev);
index f1d9f0569d7f86514773e517bafba9326de82a45..b1dbb60af99fa79a796174dc560973b34ba6c4cc 100644 (file)
@@ -1121,16 +1121,14 @@ static int drm_connector_register_all(struct drm_device *dev)
        struct drm_connector *connector;
        int ret;
 
-       mutex_lock(&dev->mode_config.mutex);
-
-       drm_for_each_connector(connector, dev) {
+       /* FIXME: taking the mode config mutex ends up in a clash with
+        * fbcon/backlight registration */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                ret = drm_connector_register(connector);
                if (ret)
                        goto err;
        }
 
-       mutex_unlock(&dev->mode_config.mutex);
-
        return 0;
 
 err:
index 7df26d4b7ad8b5775c97df7e5ab3fdab315195e5..637a0aa4d3a0c1d71e83c8d0cde2b18584a20629 100644 (file)
@@ -74,6 +74,8 @@
 #define EDID_QUIRK_FORCE_8BPC                  (1 << 8)
 /* Force 12bpc */
 #define EDID_QUIRK_FORCE_12BPC                 (1 << 9)
+/* Force 6bpc */
+#define EDID_QUIRK_FORCE_6BPC                  (1 << 10)
 
 struct detailed_mode_closure {
        struct drm_connector *connector;
@@ -100,6 +102,9 @@ static struct edid_quirk {
        /* Unknown Acer */
        { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
 
+       /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+       { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+
        /* Belinea 10 15 55 */
        { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
        { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3862,6 +3867,20 @@ static void drm_add_display_info(struct edid *edid,
        /* HDMI deep color modes supported? Assign to info, if so */
        drm_assign_hdmi_deep_color_info(edid, info, connector);
 
+       /*
+        * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
+        *
+        * For such displays, the DFP spec 1.0, section 3.10 "EDID support"
+        * tells us to assume 8 bpc color depth if the EDID doesn't have
+        * extensions which tell otherwise.
+        */
+       if ((info->bpc == 0) && (edid->revision < 4) &&
+           (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) {
+               info->bpc = 8;
+               DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
+                         connector->name, info->bpc);
+       }
+
        /* Only defined for 1.4 with digital displays */
        if (edid->revision < 4)
                return;
@@ -4082,6 +4101,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
 
        drm_add_display_info(edid, &connector->display_info, connector);
 
+       if (quirks & EDID_QUIRK_FORCE_6BPC)
+               connector->display_info.bpc = 6;
+
        if (quirks & EDID_QUIRK_FORCE_8BPC)
                connector->display_info.bpc = 8;
 
index c457eed76f1f7a1fca441d4714c2aebf95534d05..dcf93b3d4fb6cb75a836a242ef2e560e8f065b49 100644 (file)
@@ -5691,15 +5691,7 @@ static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
 
 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
 {
-       unsigned int i;
-
-       for (i = 0; i < 15; i++) {
-               if (skl_cdclk_pcu_ready(dev_priv))
-                       return true;
-               udelay(10);
-       }
-
-       return false;
+       return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
 }
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
@@ -12114,21 +12106,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
                pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
        }
 
-       /* Clamp bpp to default limit on screens without EDID 1.4 */
-       if (connector->base.display_info.bpc == 0) {
-               int type = connector->base.connector_type;
-               int clamp_bpp = 24;
-
-               /* Fall back to 18 bpp when DP sink capability is unknown. */
-               if (type == DRM_MODE_CONNECTOR_DisplayPort ||
-                   type == DRM_MODE_CONNECTOR_eDP)
-                       clamp_bpp = 18;
-
-               if (bpp > clamp_bpp) {
-                       DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
-                                     bpp, clamp_bpp);
-                       pipe_config->pipe_bpp = clamp_bpp;
-               }
+       /* Clamp bpp to 8 on screens without EDID 1.4 */
+       if (connector->base.display_info.bpc == 0 && bpp > 24) {
+               DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+                             bpp);
+               pipe_config->pipe_bpp = 24;
        }
 }
 
index 86b00c6db1a6d694ad1beb97fcb37fc2cb899873..3e3632c18733502816dc169daab852910fcee814 100644 (file)
@@ -782,7 +782,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
        struct intel_fbdev *ifbdev = dev_priv->fbdev;
        struct fb_info *info;
 
-       if (!ifbdev)
+       if (!ifbdev || !ifbdev->fb)
                return;
 
        info = ifbdev->helper.fbdev;
@@ -827,31 +827,28 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       if (dev_priv->fbdev)
-               drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+       if (ifbdev && ifbdev->fb)
+               drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
 void intel_fbdev_restore_mode(struct drm_device *dev)
 {
-       int ret;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_fbdev *ifbdev = dev_priv->fbdev;
-       struct drm_fb_helper *fb_helper;
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
        if (!ifbdev)
                return;
 
        intel_fbdev_sync(ifbdev);
+       if (!ifbdev->fb)
+               return;
 
-       fb_helper = &ifbdev->helper;
-
-       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
-       if (ret) {
+       if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
                DRM_DEBUG("failed to restore crtc mode\n");
        } else {
-               mutex_lock(&fb_helper->dev->struct_mutex);
+               mutex_lock(&dev->struct_mutex);
                intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
-               mutex_unlock(&fb_helper->dev->struct_mutex);
+               mutex_unlock(&dev->struct_mutex);
        }
 }
index f4f3fcc8b3becb59c0ed5d6bdd5dd27a177f3731..97ba6c8cf907862197a42bbd9935030df0e16856 100644 (file)
@@ -4892,7 +4892,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
                else
                        gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
                dev_priv->rps.last_adj = 0;
-               I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+               I915_WRITE(GEN6_PMINTRMSK,
+                          gen6_sanitize_rps_pm_mask(dev_priv, ~0));
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
 
index 528bdeffb339ecaaa887c97887dc80427a153c77..6190035edfeaa2af9ae30063bad3f73aca822f7d 100644 (file)
@@ -1151,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
 out:
        ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
@@ -1179,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
index ffdad81ef9647cbc96612e3d0dc4d41b85009dff..0c00e192c8458406e11783d1bfb674e18747a811 100644 (file)
@@ -346,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+       r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
        ttm_bo_mem_put(bo, &tmp_mem);
        return r;
@@ -379,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
index 4de3ff0dbebd8cba5bf4a761ae9d97bb37f86c00..e03004f4588deb2bb360fa78b9d0540b6eb43ca4 100644 (file)
@@ -125,6 +125,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
 
        /* Link drm_bridge to encoder */
        bridge->encoder = encoder;
+       encoder->bridge = bridge;
 
        ret = drm_bridge_attach(rcdu->ddev, bridge);
        if (ret) {
index 4054d804fe068f5821d49d675549d40a7ea23fb6..42c074a9c9551571c2dc782c05c5b568f8d0a789 100644 (file)
@@ -354,7 +354,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-               ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+               ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
+                                     mem);
        else if (bdev->driver->move)
                ret = bdev->driver->move(bo, evict, interruptible,
                                         no_wait_gpu, mem);
index 2df602a35f9291ce178a6634ad9d0531ff331df4..f157a9efd220864858489ee8e8c4fe71ba68900c 100644 (file)
@@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                   bool evict,
+                   bool evict, bool interruptible,
                    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct ttm_tt *ttm = bo->ttm;
@@ -53,6 +53,14 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
        int ret;
 
        if (old_mem->mem_type != TTM_PL_SYSTEM) {
+               ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+
+               if (unlikely(ret != 0)) {
+                       if (ret != -ERESTARTSYS)
+                               pr_err("Failed to expire sync object before unbinding TTM\n");
+                       return ret;
+               }
+
                ttm_tt_unbind(ttm);
                ttm_bo_free_old_node(bo);
                ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
index 4387ccb79e642c34f8d3b6c2a39ba4daae15e325..7410c6d9a34db942afd5624ebf54aadaffe9eb94 100644 (file)
@@ -69,5 +69,6 @@ OBJCOPYFLAGS :=
 OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
                        --set-section-flags .text=alloc,readonly \
                        --rename-section .text=.rodata
-$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o
+targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
+$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
        $(call if_changed,objcopy)
index 5a3fd76eec27b226ec4a9a9dd9655d0a2ae95759..5525a204db93aadb15a2691ede56f915857845e4 100644 (file)
@@ -49,7 +49,7 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
 
        /* This is a pointer to outside our current stack frame. */
        if (bad_frame) {
-               bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
+               bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
        } else {
                /* Put start address just inside stack. */
                bad_stack = task_stack_page(current) + THREAD_SIZE;
index d2bc092defd77732f780acb531931f165d440498..da2fe18162e1196b932eb42baeea1ad47f411ee3 100644 (file)
@@ -110,8 +110,8 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
        /* BIOS error detected */
        { KE_IGNORE, 0xe00d, { KEY_RESERVED } },
 
-       /* Unknown, defined in ACPI DSDT */
-       /* { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, */
+       /* Battery was removed or inserted */
+       { KE_IGNORE, 0xe00e, { KEY_RESERVED } },
 
        /* Wifi Catcher */
        { KE_KEY,    0xe011, { KEY_PROG2 } },
index cecc15a880de6928fed5ee0d35588fbae03423dd..3fa17ac8df5492f4d2e4681d1a4f07f2f8defffa 100644 (file)
@@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch)
 static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
                                           long timeout)
 {
-       struct rio_channel *ch = NULL;
-       struct rio_channel *new_ch = NULL;
+       struct rio_channel *ch;
+       struct rio_channel *new_ch;
        struct conn_req *req;
        struct cm_peer *peer;
        int found = 0;
@@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
 
        spin_unlock_bh(&ch->lock);
        riocm_put_channel(ch);
+       ch = NULL;
        kfree(req);
 
        down_read(&rdev_sem);
@@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
        if (!found) {
                /* If peer device object not found, simply ignore the request */
                err = -ENODEV;
-               goto err_nodev;
+               goto err_put_new_ch;
        }
 
        new_ch->rdev = peer->rdev;
@@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
 
        *new_ch_id = new_ch->id;
        return new_ch;
+
+err_put_new_ch:
+       spin_lock_bh(&idr_lock);
+       idr_remove(&ch_idr, new_ch->id);
+       spin_unlock_bh(&idr_lock);
+       riocm_put_channel(new_ch);
+
 err_put:
-       riocm_put_channel(ch);
-err_nodev:
-       if (new_ch) {
-               spin_lock_bh(&idr_lock);
-               idr_remove(&ch_idr, new_ch->id);
-               spin_unlock_bh(&idr_lock);
-               riocm_put_channel(new_ch);
-       }
+       if (ch)
+               riocm_put_channel(ch);
        *new_ch_id = 0;
        return ERR_PTR(err);
 }
index bf85974be8621e16a130ffd711e18697231da3da..17d04c702e1ba13a1642d36a1de98fb7831665eb 100644 (file)
@@ -10410,8 +10410,11 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
                __ipr_remove(pdev);
                return rc;
        }
+       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+       ioa_cfg->scan_enabled = 1;
+       schedule_work(&ioa_cfg->work_q);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 
-       scsi_scan_host(ioa_cfg->host);
        ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
 
        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
@@ -10421,10 +10424,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
                }
        }
 
-       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-       ioa_cfg->scan_enabled = 1;
-       schedule_work(&ioa_cfg->work_q);
-       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+       scsi_scan_host(ioa_cfg->host);
+
        return 0;
 }
 
index b6d210e7a993fd67634b3523aa3e61a1121d31bd..d9ddcfc18c91f8acd0ac977171116e3659aa2242 100644 (file)
@@ -862,33 +862,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        return 0;
 }
 
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
-                                    struct btrfs_trans_handle *trans,
-                                    u64 ref_root, u64 bytenr, u64 num_bytes)
-{
-       struct btrfs_delayed_ref_root *delayed_refs;
-       struct btrfs_delayed_ref_head *ref_head;
-       int ret = 0;
-
-       if (!fs_info->quota_enabled || !is_fstree(ref_root))
-               return 0;
-
-       delayed_refs = &trans->transaction->delayed_refs;
-
-       spin_lock(&delayed_refs->lock);
-       ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
-       if (!ref_head) {
-               ret = -ENOENT;
-               goto out;
-       }
-       WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
-       ref_head->qgroup_ref_root = ref_root;
-       ref_head->qgroup_reserved = num_bytes;
-out:
-       spin_unlock(&delayed_refs->lock);
-       return ret;
-}
-
 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
                                struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
index 5fca9534a2712b0b4dec9e9b15a1e024f272bb2f..43f3629760e90f186730842b0b1c609f799ae256 100644 (file)
@@ -250,9 +250,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
                               u64 parent, u64 ref_root,
                               u64 owner, u64 offset, u64 reserved, int action,
                               struct btrfs_delayed_extent_op *extent_op);
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
-                                    struct btrfs_trans_handle *trans,
-                                    u64 ref_root, u64 bytenr, u64 num_bytes);
 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
                                struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
index 9404121fd5f7b44f165c6f76c856548cf5722aff..5842423f8f47b6a7146240c6b47ea1ead637f984 100644 (file)
@@ -2033,6 +2033,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                 */
                clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
                          &BTRFS_I(inode)->runtime_flags);
+               /*
+                * An ordered extent might have started before and completed
+                * already with io errors, in which case the inode was not
+                * updated and we end up here. So check the inode's mapping
+                * flags for any errors that might have happened while doing
+                * writeback of file data.
+                */
+               ret = btrfs_inode_check_errors(inode);
                inode_unlock(inode);
                goto out;
        }
index 2f5975954ccf198737e07b29c8706024114a78ae..08dfc57e22705363f1159def79316263a9f4293a 100644 (file)
@@ -3435,10 +3435,10 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                found_key.offset = 0;
                inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
                ret = PTR_ERR_OR_ZERO(inode);
-               if (ret && ret != -ESTALE)
+               if (ret && ret != -ENOENT)
                        goto out;
 
-               if (ret == -ESTALE && root == root->fs_info->tree_root) {
+               if (ret == -ENOENT && root == root->fs_info->tree_root) {
                        struct btrfs_root *dead_root;
                        struct btrfs_fs_info *fs_info = root->fs_info;
                        int is_dead_root = 0;
@@ -3474,7 +3474,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                 * Inode is already gone but the orphan item is still there,
                 * kill the orphan item.
                 */
-               if (ret == -ESTALE) {
+               if (ret == -ENOENT) {
                        trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
                                ret = PTR_ERR(trans);
@@ -3633,7 +3633,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
 /*
  * read an inode from the btree into the in-memory inode
  */
-static void btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode)
 {
        struct btrfs_path *path;
        struct extent_buffer *leaf;
@@ -3652,14 +3652,19 @@ static void btrfs_read_locked_inode(struct inode *inode)
                filled = true;
 
        path = btrfs_alloc_path();
-       if (!path)
+       if (!path) {
+               ret = -ENOMEM;
                goto make_bad;
+       }
 
        memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
 
        ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
-       if (ret)
+       if (ret) {
+               if (ret > 0)
+                       ret = -ENOENT;
                goto make_bad;
+       }
 
        leaf = path->nodes[0];
 
@@ -3812,11 +3817,12 @@ cache_acl:
        }
 
        btrfs_update_iflags(inode);
-       return;
+       return 0;
 
 make_bad:
        btrfs_free_path(path);
        make_bad_inode(inode);
+       return ret;
 }
 
 /*
@@ -4204,6 +4210,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
        int err = 0;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct btrfs_trans_handle *trans;
+       u64 last_unlink_trans;
 
        if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
                return -ENOTEMPTY;
@@ -4226,11 +4233,27 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
        if (err)
                goto out;
 
+       last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
+
        /* now the directory is empty */
        err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
                                 dentry->d_name.name, dentry->d_name.len);
-       if (!err)
+       if (!err) {
                btrfs_i_size_write(inode, 0);
+               /*
+                * Propagate the last_unlink_trans value of the deleted dir to
+                * its parent directory. This is to prevent an unrecoverable
+                * log tree in the case we do something like this:
+                * 1) create dir foo
+                * 2) create snapshot under dir foo
+                * 3) delete the snapshot
+                * 4) rmdir foo
+                * 5) mkdir foo
+                * 6) fsync foo or some file inside foo
+                */
+               if (last_unlink_trans >= trans->transid)
+                       BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
+       }
 out:
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root);
@@ -5606,7 +5629,9 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                return ERR_PTR(-ENOMEM);
 
        if (inode->i_state & I_NEW) {
-               btrfs_read_locked_inode(inode);
+               int ret;
+
+               ret = btrfs_read_locked_inode(inode);
                if (!is_bad_inode(inode)) {
                        inode_tree_add(inode);
                        unlock_new_inode(inode);
@@ -5615,7 +5640,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                } else {
                        unlock_new_inode(inode);
                        iput(inode);
-                       inode = ERR_PTR(-ESTALE);
+                       ASSERT(ret < 0);
+                       inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
                }
        }
 
index b71dd298385c1b5cfb3c00761db0c8ee674e01e7..efe129fe26788c1078d737a2bc238373c2efcbac 100644 (file)
@@ -231,7 +231,6 @@ struct pending_dir_move {
        u64 parent_ino;
        u64 ino;
        u64 gen;
-       bool is_orphan;
        struct list_head update_refs;
 };
 
@@ -274,6 +273,39 @@ struct name_cache_entry {
        char name[];
 };
 
+static void inconsistent_snapshot_error(struct send_ctx *sctx,
+                                       enum btrfs_compare_tree_result result,
+                                       const char *what)
+{
+       const char *result_string;
+
+       switch (result) {
+       case BTRFS_COMPARE_TREE_NEW:
+               result_string = "new";
+               break;
+       case BTRFS_COMPARE_TREE_DELETED:
+               result_string = "deleted";
+               break;
+       case BTRFS_COMPARE_TREE_CHANGED:
+               result_string = "updated";
+               break;
+       case BTRFS_COMPARE_TREE_SAME:
+               ASSERT(0);
+               result_string = "unchanged";
+               break;
+       default:
+               ASSERT(0);
+               result_string = "unexpected";
+       }
+
+       btrfs_err(sctx->send_root->fs_info,
+                 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
+                 result_string, what, sctx->cmp_key->objectid,
+                 sctx->send_root->root_key.objectid,
+                 (sctx->parent_root ?
+                  sctx->parent_root->root_key.objectid : 0));
+}
+
 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
 
 static struct waiting_dir_move *
@@ -1861,7 +1893,8 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
         * was already unlinked/moved, so we can safely assume that we will not
         * overwrite anything at this point in time.
         */
-       if (other_inode > sctx->send_progress) {
+       if (other_inode > sctx->send_progress ||
+           is_waiting_for_move(sctx, other_inode)) {
                ret = get_inode_info(sctx->parent_root, other_inode, NULL,
                                who_gen, NULL, NULL, NULL, NULL);
                if (ret < 0)
@@ -2502,6 +2535,8 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
        key.type = BTRFS_INODE_ITEM_KEY;
        key.offset = 0;
        ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
+       if (ret > 0)
+               ret = -ENOENT;
        if (ret < 0)
                goto out;
 
@@ -2947,6 +2982,10 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
                }
 
                if (loc.objectid > send_progress) {
+                       struct orphan_dir_info *odi;
+
+                       odi = get_orphan_dir_info(sctx, dir);
+                       free_orphan_dir_info(sctx, odi);
                        ret = 0;
                        goto out;
                }
@@ -3047,7 +3086,6 @@ static int add_pending_dir_move(struct send_ctx *sctx,
        pm->parent_ino = parent_ino;
        pm->ino = ino;
        pm->gen = ino_gen;
-       pm->is_orphan = is_orphan;
        INIT_LIST_HEAD(&pm->list);
        INIT_LIST_HEAD(&pm->update_refs);
        RB_CLEAR_NODE(&pm->node);
@@ -3113,6 +3151,48 @@ static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
        return NULL;
 }
 
+static int path_loop(struct send_ctx *sctx, struct fs_path *name,
+                    u64 ino, u64 gen, u64 *ancestor_ino)
+{
+       int ret = 0;
+       u64 parent_inode = 0;
+       u64 parent_gen = 0;
+       u64 start_ino = ino;
+
+       *ancestor_ino = 0;
+       while (ino != BTRFS_FIRST_FREE_OBJECTID) {
+               fs_path_reset(name);
+
+               if (is_waiting_for_rm(sctx, ino))
+                       break;
+               if (is_waiting_for_move(sctx, ino)) {
+                       if (*ancestor_ino == 0)
+                               *ancestor_ino = ino;
+                       ret = get_first_ref(sctx->parent_root, ino,
+                                           &parent_inode, &parent_gen, name);
+               } else {
+                       ret = __get_cur_name_and_parent(sctx, ino, gen,
+                                                       &parent_inode,
+                                                       &parent_gen, name);
+                       if (ret > 0) {
+                               ret = 0;
+                               break;
+                       }
+               }
+               if (ret < 0)
+                       break;
+               if (parent_inode == start_ino) {
+                       ret = 1;
+                       if (*ancestor_ino == 0)
+                               *ancestor_ino = ino;
+                       break;
+               }
+               ino = parent_inode;
+               gen = parent_gen;
+       }
+       return ret;
+}
+
 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
 {
        struct fs_path *from_path = NULL;
@@ -3123,6 +3203,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        u64 parent_ino, parent_gen;
        struct waiting_dir_move *dm = NULL;
        u64 rmdir_ino = 0;
+       u64 ancestor;
+       bool is_orphan;
        int ret;
 
        name = fs_path_alloc();
@@ -3135,9 +3217,10 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        dm = get_waiting_dir_move(sctx, pm->ino);
        ASSERT(dm);
        rmdir_ino = dm->rmdir_ino;
+       is_orphan = dm->orphanized;
        free_waiting_dir_move(sctx, dm);
 
-       if (pm->is_orphan) {
+       if (is_orphan) {
                ret = gen_unique_name(sctx, pm->ino,
                                      pm->gen, from_path);
        } else {
@@ -3155,6 +3238,24 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                goto out;
 
        sctx->send_progress = sctx->cur_ino + 1;
+       ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
+       if (ret < 0)
+               goto out;
+       if (ret) {
+               LIST_HEAD(deleted_refs);
+               ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
+               ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
+                                          &pm->update_refs, &deleted_refs,
+                                          is_orphan);
+               if (ret < 0)
+                       goto out;
+               if (rmdir_ino) {
+                       dm = get_waiting_dir_move(sctx, pm->ino);
+                       ASSERT(dm);
+                       dm->rmdir_ino = rmdir_ino;
+               }
+               goto out;
+       }
        fs_path_reset(name);
        to_path = name;
        name = NULL;
@@ -3174,7 +3275,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                        /* already deleted */
                        goto finish;
                }
-               ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
+               ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
                if (ret < 0)
                        goto out;
                if (!ret)
@@ -3204,8 +3305,18 @@ finish:
         * and old parent(s).
         */
        list_for_each_entry(cur, &pm->update_refs, list) {
-               if (cur->dir == rmdir_ino)
+               /*
+                * The parent inode might have been deleted in the send snapshot
+                */
+               ret = get_inode_info(sctx->send_root, cur->dir, NULL,
+                                    NULL, NULL, NULL, NULL, NULL);
+               if (ret == -ENOENT) {
+                       ret = 0;
                        continue;
+               }
+               if (ret < 0)
+                       goto out;
+
                ret = send_utimes(sctx, cur->dir, cur->dir_gen);
                if (ret < 0)
                        goto out;
@@ -3325,6 +3436,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
        u64 left_gen;
        u64 right_gen;
        int ret = 0;
+       struct waiting_dir_move *wdm;
 
        if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
                return 0;
@@ -3383,7 +3495,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
                goto out;
        }
 
-       if (is_waiting_for_move(sctx, di_key.objectid)) {
+       wdm = get_waiting_dir_move(sctx, di_key.objectid);
+       if (wdm && !wdm->orphanized) {
                ret = add_pending_dir_move(sctx,
                                           sctx->cur_ino,
                                           sctx->cur_inode_gen,
@@ -3470,7 +3583,8 @@ static int wait_for_parent_move(struct send_ctx *sctx,
                        ret = is_ancestor(sctx->parent_root,
                                          sctx->cur_ino, sctx->cur_inode_gen,
                                          ino, path_before);
-                       break;
+                       if (ret)
+                               break;
                }
 
                fs_path_reset(path_before);
@@ -3643,11 +3757,26 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
                                goto out;
                        if (ret) {
                                struct name_cache_entry *nce;
+                               struct waiting_dir_move *wdm;
 
                                ret = orphanize_inode(sctx, ow_inode, ow_gen,
                                                cur->full_path);
                                if (ret < 0)
                                        goto out;
+
+                               /*
+                                * If ow_inode has its rename operation delayed
+                                * make sure that its orphanized name is used in
+                                * the source path when performing its rename
+                                * operation.
+                                */
+                               if (is_waiting_for_move(sctx, ow_inode)) {
+                                       wdm = get_waiting_dir_move(sctx,
+                                                                  ow_inode);
+                                       ASSERT(wdm);
+                                       wdm->orphanized = true;
+                               }
+
                                /*
                                 * Make sure we clear our orphanized inode's
                                 * name from the name cache. This is because the
@@ -3663,6 +3792,19 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
                                        name_cache_delete(sctx, nce);
                                        kfree(nce);
                                }
+
+                               /*
+                                * ow_inode might currently be an ancestor of
+                                * cur_ino, therefore compute valid_path (the
+                                * current path of cur_ino) again because it
+                                * might contain the pre-orphanization name of
+                                * ow_inode, which is no longer valid.
+                                */
+                               fs_path_reset(valid_path);
+                               ret = get_cur_path(sctx, sctx->cur_ino,
+                                          sctx->cur_inode_gen, valid_path);
+                               if (ret < 0)
+                                       goto out;
                        } else {
                                ret = send_unlink(sctx, cur->full_path);
                                if (ret < 0)
@@ -5602,7 +5744,10 @@ static int changed_ref(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+       if (sctx->cur_ino != sctx->cmp_key->objectid) {
+               inconsistent_snapshot_error(sctx, result, "reference");
+               return -EIO;
+       }
 
        if (!sctx->cur_inode_new_gen &&
            sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
@@ -5627,7 +5772,10 @@ static int changed_xattr(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+       if (sctx->cur_ino != sctx->cmp_key->objectid) {
+               inconsistent_snapshot_error(sctx, result, "xattr");
+               return -EIO;
+       }
 
        if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
                if (result == BTRFS_COMPARE_TREE_NEW)
@@ -5651,7 +5799,10 @@ static int changed_extent(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+       if (sctx->cur_ino != sctx->cmp_key->objectid) {
+               inconsistent_snapshot_error(sctx, result, "extent");
+               return -EIO;
+       }
 
        if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
                if (result != BTRFS_COMPARE_TREE_DELETED)
index d31a0c4f56bed436e0eb933cceb592fdc498eb53..fff3f3efa43602e0c04f9e5e019bf0e85a239d6f 100644 (file)
@@ -4469,7 +4469,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
                                         const int slot,
                                         const struct btrfs_key *key,
-                                        struct inode *inode)
+                                        struct inode *inode,
+                                        u64 *other_ino)
 {
        int ret;
        struct btrfs_path *search_path;
@@ -4528,7 +4529,16 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
                                           search_path, parent,
                                           name, this_name_len, 0);
                if (di && !IS_ERR(di)) {
-                       ret = 1;
+                       struct btrfs_key di_key;
+
+                       btrfs_dir_item_key_to_cpu(search_path->nodes[0],
+                                                 di, &di_key);
+                       if (di_key.type == BTRFS_INODE_ITEM_KEY) {
+                               ret = 1;
+                               *other_ino = di_key.objectid;
+                       } else {
+                               ret = -EAGAIN;
+                       }
                        goto out;
                } else if (IS_ERR(di)) {
                        ret = PTR_ERR(di);
@@ -4722,16 +4732,71 @@ again:
                if ((min_key.type == BTRFS_INODE_REF_KEY ||
                     min_key.type == BTRFS_INODE_EXTREF_KEY) &&
                    BTRFS_I(inode)->generation == trans->transid) {
+                       u64 other_ino = 0;
+
                        ret = btrfs_check_ref_name_override(path->nodes[0],
                                                            path->slots[0],
-                                                           &min_key, inode);
+                                                           &min_key, inode,
+                                                           &other_ino);
                        if (ret < 0) {
                                err = ret;
                                goto out_unlock;
                        } else if (ret > 0) {
-                               err = 1;
-                               btrfs_set_log_full_commit(root->fs_info, trans);
-                               goto out_unlock;
+                               struct btrfs_key inode_key;
+                               struct inode *other_inode;
+
+                               if (ins_nr > 0) {
+                                       ins_nr++;
+                               } else {
+                                       ins_nr = 1;
+                                       ins_start_slot = path->slots[0];
+                               }
+                               ret = copy_items(trans, inode, dst_path, path,
+                                                &last_extent, ins_start_slot,
+                                                ins_nr, inode_only,
+                                                logged_isize);
+                               if (ret < 0) {
+                                       err = ret;
+                                       goto out_unlock;
+                               }
+                               ins_nr = 0;
+                               btrfs_release_path(path);
+                               inode_key.objectid = other_ino;
+                               inode_key.type = BTRFS_INODE_ITEM_KEY;
+                               inode_key.offset = 0;
+                               other_inode = btrfs_iget(root->fs_info->sb,
+                                                        &inode_key, root,
+                                                        NULL);
+                               /*
+                                * If the other inode that had a conflicting dir
+                                * entry was deleted in the current transaction,
+                                * we don't need to do more work nor fallback to
+                                * a transaction commit.
+                                */
+                               if (IS_ERR(other_inode) &&
+                                   PTR_ERR(other_inode) == -ENOENT) {
+                                       goto next_key;
+                               } else if (IS_ERR(other_inode)) {
+                                       err = PTR_ERR(other_inode);
+                                       goto out_unlock;
+                               }
+                               /*
+                                * We are safe logging the other inode without
+                                * acquiring its i_mutex as long as we log with
+                                * the LOG_INODE_EXISTS mode. We're safe against
+                                * concurrent renames of the other inode as well
+                                * because during a rename we pin the log and
+                                * update the log with the new name before we
+                                * unpin it.
+                                */
+                               err = btrfs_log_inode(trans, root, other_inode,
+                                                     LOG_INODE_EXISTS,
+                                                     0, LLONG_MAX, ctx);
+                               iput(other_inode);
+                               if (err)
+                                       goto out_unlock;
+                               else
+                                       goto next_key;
                        }
                }
 
@@ -4799,7 +4864,7 @@ next_slot:
                        ins_nr = 0;
                }
                btrfs_release_path(path);
-
+next_key:
                if (min_key.offset < (u64)-1) {
                        min_key.offset++;
                } else if (min_key.type < max_key.type) {
@@ -4993,8 +5058,12 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
                if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
                        break;
 
-               if (IS_ROOT(parent))
+               if (IS_ROOT(parent)) {
+                       inode = d_inode(parent);
+                       if (btrfs_must_commit_transaction(trans, inode))
+                               ret = 1;
                        break;
+               }
 
                parent = dget_parent(parent);
                dput(old_parent);
index 99115cae1652ac1661d37a5a286da7180b6d9d94..16e6ded0b7f281bf72e8074b9d2896713fe4aef2 100644 (file)
@@ -1347,9 +1347,12 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
 {
        struct inode *inode = &ci->vfs_inode;
        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
-       struct ceph_mds_session *session = *psession;
+       struct ceph_mds_session *session = NULL;
        int mds;
+
        dout("ceph_flush_snaps %p\n", inode);
+       if (psession)
+               session = *psession;
 retry:
        spin_lock(&ci->i_ceph_lock);
        if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
index fa59a85226b262f2fe086ec5dfc1bf6813711986..f72d4ae303b273a98ee2631d8ed3dde21a71e796 100644 (file)
@@ -2759,6 +2759,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        } else {
                path = NULL;
                pathlen = 0;
+               pathbase = 0;
        }
 
        spin_lock(&ci->i_ceph_lock);
index 4b32928f542661f5e04730ca8c8cb134e59ea531..4ebe6b2e5217c2e26c7185bcf4254d8af3b55872 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -144,10 +144,8 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
        struct page *page = buf->page;
 
        if (page_count(page) == 1) {
-               if (memcg_kmem_enabled()) {
+               if (memcg_kmem_enabled())
                        memcg_kmem_uncharge(page, 0);
-                       __ClearPageKmemcg(page);
-               }
                __SetPageLocked(page);
                return 0;
        }
index 4348d6d5877a213b95c4b4e546287c9032b44a1a..99c6d01d24f2439f2c8134d546bf287c2da0496e 100644 (file)
@@ -962,6 +962,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -976,7 +977,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                          bool evict, bool no_wait_gpu,
+                          bool evict, bool interruptible, bool no_wait_gpu,
                           struct ttm_mem_reg *new_mem);
 
 /**
index f2e4e90621ec25c237703bc5b1ecba814d1afce9..d572b78b65e14709d5fb8d8a60ee9d71147d0d81 100644 (file)
@@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES];
 
 #ifdef CONFIG_CMA
 #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
 #else
 #  define is_migrate_cma(migratetype) false
+#  define is_migrate_cma_page(_page) false
 #endif
 
 #define for_each_migratetype_order(order, type) \
index 8dc155dab3ed4950ef7f2ce6bfb737b3617b0e68..696a56be7d3e2ba0f54d92ac059941470b228ccc 100644 (file)
@@ -266,39 +266,21 @@ extern asmlinkage void dump_stack(void) __cold;
  * and other debug macros are compiled out unless either DEBUG is defined
  * or CONFIG_DYNAMIC_DEBUG is set.
  */
-
-#ifdef CONFIG_PRINTK
-
-asmlinkage __printf(1, 2) __cold void __pr_emerg(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_alert(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_crit(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_err(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_warn(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_notice(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_info(const char *fmt, ...);
-
-#define pr_emerg(fmt, ...)     __pr_emerg(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...)     __pr_alert(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...)      __pr_crit(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...)       __pr_err(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...)      __pr_warn(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...)    __pr_notice(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...)      __pr_info(pr_fmt(fmt), ##__VA_ARGS__)
-
-#else
-
-#define pr_emerg(fmt, ...)     printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...)     printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...)      printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...)       printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...)      printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...)    printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...)      printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-
-#endif
-
-#define pr_warning pr_warn
-
+#define pr_emerg(fmt, ...) \
+       printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+       printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+       printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+       printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+       printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn pr_warning
+#define pr_notice(fmt, ...) \
+       printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+       printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 /*
  * Like KERN_CONT, pr_cont() should only be used when continuing
  * a line with no newline ('\n') enclosed. Otherwise it defaults
index 1a4ea551aae51c08fd70e4dd4693e97ca56f01de..4293808d8cfb5d4a473f220735e53da87390998b 100644 (file)
@@ -155,6 +155,18 @@ void kfree(const void *);
 void kzfree(const void *);
 size_t ksize(const void *);
 
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+                                             unsigned long n,
+                                             struct page *page)
+{
+       return NULL;
+}
+#endif
+
 /*
  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  * alignment larger than the alignment of a 64-bit integer.
index 352b1542f5cc21953c037f499d9b48f03ee98314..cbd8990e2e77e6ffa49f45611eb6710980fddd23 100644 (file)
@@ -105,6 +105,30 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
 
 #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
 
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+                                          const void * const stackend,
+                                          const void *obj, unsigned long len)
+{
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+                                       bool to_user);
+
+static inline void check_object_size(const void *ptr, unsigned long n,
+                                    bool to_user)
+{
+       __check_object_size(ptr, n, to_user);
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+                                    bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
index 349557825428e9b3d815e0bc2781d5d79b172d47..f30c187ed785366231e318a7beab151e0bba64b6 100644 (file)
@@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
 #ifndef user_access_begin
 #define user_access_begin() do { } while (0)
 #define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr) __get_user(x, ptr)
-#define unsafe_put_user(x, ptr) __put_user(x, ptr)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
 #endif
 
 #endif         /* __LINUX_UACCESS_H__ */
index 51440131d3372feb1f09b7139362db66119f0e2a..28c5da6fdfac762e64f3d7ae26edcc9adbefe9ab 100644 (file)
@@ -330,24 +330,32 @@ TRACE_EVENT(itimer_expire,
 #ifdef CONFIG_NO_HZ_COMMON
 
 #define TICK_DEP_NAMES                                 \
-               tick_dep_name(NONE)                     \
+               tick_dep_mask_name(NONE)                \
                tick_dep_name(POSIX_TIMER)              \
                tick_dep_name(PERF_EVENTS)              \
                tick_dep_name(SCHED)                    \
                tick_dep_name_end(CLOCK_UNSTABLE)
 
 #undef tick_dep_name
+#undef tick_dep_mask_name
 #undef tick_dep_name_end
 
-#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
-#define tick_dep_name_end(sdep)  TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* The MASK will convert to their bits and they need to be processed too */
+#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+       TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+#define tick_dep_name_end(sdep)  TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+       TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* NONE only has a mask defined for it */
+#define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
 
 TICK_DEP_NAMES
 
 #undef tick_dep_name
+#undef tick_dep_mask_name
 #undef tick_dep_name_end
 
 #define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
+#define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
 #define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep }
 
 #define show_tick_dep_name(val)                                \
index 69886493ff1e3d31daa02d909447c744352b38ff..cac3f096050d5b27a9bd5de8600469f3c5aa0ada 100644 (file)
@@ -1761,6 +1761,7 @@ choice
 
 config SLAB
        bool "SLAB"
+       select HAVE_HARDENED_USERCOPY_ALLOCATOR
        help
          The regular slab allocator that is established and known to work
          well in all environments. It organizes cache hot objects in
@@ -1768,6 +1769,7 @@ config SLAB
 
 config SLUB
        bool "SLUB (Unqueued Allocator)"
+       select HAVE_HARDENED_USERCOPY_ALLOCATOR
        help
           SLUB is a slab allocator that minimizes cache line usage
           instead of managing queues of cached objects (SLAB approach).
index 5d4505f30083550e59337fe5d90cf14f783573c0..7fd2838fa41748006cebf14ed7432739f3138301 100644 (file)
  */
 #include <linux/percpu.h>
 
-typedef __printf(2, 0) int (*printk_func_t)(int level, const char *fmt,
-                                           va_list args);
+typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
 
-__printf(2, 0)
-int vprintk_default(int level, const char *fmt, va_list args);
+int __printf(1, 0) vprintk_default(const char *fmt, va_list args);
 
 #ifdef CONFIG_PRINTK_NMI
 
@@ -33,10 +31,9 @@ extern raw_spinlock_t logbuf_lock;
  * via per-CPU variable.
  */
 DECLARE_PER_CPU(printk_func_t, printk_func);
-__printf(2, 0)
-static inline int vprintk_func(int level, const char *fmt, va_list args)
+static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
 {
-       return this_cpu_read(printk_func)(level, fmt, args);
+       return this_cpu_read(printk_func)(fmt, args);
 }
 
 extern atomic_t nmi_message_lost;
@@ -47,10 +44,9 @@ static inline int get_nmi_message_lost(void)
 
 #else /* CONFIG_PRINTK_NMI */
 
-__printf(2, 0)
-static inline int vprintk_func(int level, const char *fmt, va_list args)
+static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
 {
-       return vprintk_default(level, fmt, args);
+       return vprintk_default(fmt, args);
 }
 
 static inline int get_nmi_message_lost(void)
index bc3eeb1ae6da6b58a1aed09372f70fc8c3fda088..b69eb8a2876fc2ec7ba1af9e5cbc000737cd47fd 100644 (file)
@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
  * one writer running. But the buffer might get flushed from another
  * CPU, so we need to be careful.
  */
-static int vprintk_nmi(int level, const char *fmt, va_list args)
+static int vprintk_nmi(const char *fmt, va_list args)
 {
        struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
        int add = 0;
@@ -79,16 +79,7 @@ again:
        if (!len)
                smp_rmb();
 
-       if (level != LOGLEVEL_DEFAULT) {
-               add = snprintf(s->buffer + len, sizeof(s->buffer) - len,
-                               KERN_SOH "%c", '0' + level);
-               add += vsnprintf(s->buffer + len + add,
-                                sizeof(s->buffer) - len - add,
-                                fmt, args);
-       } else {
-               add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len,
-                               fmt, args);
-       }
+       add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
 
        /*
         * Do it once again if the buffer has been flushed in the meantime.
index a5ef95ca18c9d945e7d8a826dacd27c7f7ebe008..eea6dbc2d8cf6ffc4d71729eb270a659b0a8c77d 100644 (file)
@@ -1930,28 +1930,7 @@ asmlinkage int printk_emit(int facility, int level,
 }
 EXPORT_SYMBOL(printk_emit);
 
-#ifdef CONFIG_PRINTK
-#define define_pr_level(func, loglevel)                                \
-asmlinkage __visible void func(const char *fmt, ...)           \
-{                                                              \
-       va_list args;                                           \
-                                                               \
-       va_start(args, fmt);                                    \
-       vprintk_default(loglevel, fmt, args);                   \
-       va_end(args);                                           \
-}                                                              \
-EXPORT_SYMBOL(func)
-
-define_pr_level(__pr_emerg, LOGLEVEL_EMERG);
-define_pr_level(__pr_alert, LOGLEVEL_ALERT);
-define_pr_level(__pr_crit, LOGLEVEL_CRIT);
-define_pr_level(__pr_err, LOGLEVEL_ERR);
-define_pr_level(__pr_warn, LOGLEVEL_WARNING);
-define_pr_level(__pr_notice, LOGLEVEL_NOTICE);
-define_pr_level(__pr_info, LOGLEVEL_INFO);
-#endif
-
-int vprintk_default(int level, const char *fmt, va_list args)
+int vprintk_default(const char *fmt, va_list args)
 {
        int r;
 
@@ -1961,7 +1940,7 @@ int vprintk_default(int level, const char *fmt, va_list args)
                return r;
        }
 #endif
-       r = vprintk_emit(0, level, NULL, 0, fmt, args);
+       r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
 
        return r;
 }
@@ -1994,7 +1973,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
        int r;
 
        va_start(args, fmt);
-       r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args);
+       r = vprintk_func(fmt, args);
        va_end(args);
 
        return r;
index 33f655ef48cd1668604a1f60c4a97e4acbf52508..9c5fe81104135364bca9f2b0da47f4e2a1ed51fc 100644 (file)
@@ -40,8 +40,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
                unsigned long c, data;
 
                /* Fall back to byte-at-a-time if we get a page fault */
-               if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
-                       break;
+               unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+
                *(unsigned long *)(dst+res) = c;
                if (has_zero(c, &data, &constants)) {
                        data = prep_zero_mask(c, data, &constants);
@@ -56,8 +56,7 @@ byte_at_a_time:
        while (max) {
                char c;
 
-               if (unlikely(unsafe_get_user(c,src+res)))
-                       return -EFAULT;
+               unsafe_get_user(c,src+res, efault);
                dst[res] = c;
                if (!c)
                        return res;
@@ -76,6 +75,7 @@ byte_at_a_time:
         * Nope: we hit the address space limit, and we still had more
         * characters the caller would have wanted. That's an EFAULT.
         */
+efault:
        return -EFAULT;
 }
 
index 2625943625d7fb229e6e2cf104e5d84c95246ffa..8e105ed4df12bb6bb0a170afff54d979c15d73c0 100644 (file)
@@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
        src -= align;
        max += align;
 
-       if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
-               return 0;
+       unsafe_get_user(c, (unsigned long __user *)src, efault);
        c |= aligned_byte_mask(align);
 
        for (;;) {
@@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
                if (unlikely(max <= sizeof(unsigned long)))
                        break;
                max -= sizeof(unsigned long);
-               if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
-                       return 0;
+               unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
        }
        res -= align;
 
@@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
         * Nope: we hit the address space limit, and we still had more
         * characters the caller would have wanted. That's 0.
         */
+efault:
        return 0;
 }
 
index fc059666c760e179db0a070926a1e8204f5e9c92..2ca1faf3fa09038feaeea4fb4adbe5ea6717df30 100644 (file)
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n
 KCOV_INSTRUMENT_mmzone.o := n
 KCOV_INSTRUMENT_vmstat.o := n
 
+# Since __builtin_frame_address does work as used, disable the warning.
+CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+
 mmu-y                  := nommu.o
 mmu-$(CONFIG_MMU)      := gup.o highmem.o memory.o mincore.o \
                           mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -99,3 +102,4 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
 obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
 obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
 obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
+obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
index 66beca1ad92ffcc16c3636c4e7e54c46bc89cc3f..e74d7080ec9e63681ce3145cda26d2fce6eb8ed3 100644 (file)
@@ -2337,8 +2337,11 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
                return 0;
 
        memcg = get_mem_cgroup_from_mm(current->mm);
-       if (!mem_cgroup_is_root(memcg))
+       if (!mem_cgroup_is_root(memcg)) {
                ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
+               if (!ret)
+                       __SetPageKmemcg(page);
+       }
        css_put(&memcg->css);
        return ret;
 }
@@ -2365,6 +2368,11 @@ void memcg_kmem_uncharge(struct page *page, int order)
                page_counter_uncharge(&memcg->memsw, nr_pages);
 
        page->mem_cgroup = NULL;
+
+       /* slab pages do not have PageKmemcg flag set */
+       if (PageKmemcg(page))
+               __ClearPageKmemcg(page);
+
        css_put_many(&memcg->css, nr_pages);
 }
 #endif /* !CONFIG_SLOB */
@@ -5537,8 +5545,10 @@ static void uncharge_list(struct list_head *page_list)
                        else
                                nr_file += nr_pages;
                        pgpgout++;
-               } else
+               } else {
                        nr_kmem += 1 << compound_order(page);
+                       __ClearPageKmemcg(page);
+               }
 
                page->mem_cgroup = NULL;
        } while (next != page_list);
index fb975cec351821151a422fb34171121f67459228..ab2c0ff8c2e6ae4eebd1b2f715b1dcb11d593777 100644 (file)
@@ -1008,10 +1008,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
        }
        if (PageMappingFlags(page))
                page->mapping = NULL;
-       if (memcg_kmem_enabled() && PageKmemcg(page)) {
+       if (memcg_kmem_enabled() && PageKmemcg(page))
                memcg_kmem_uncharge(page, order);
-               __ClearPageKmemcg(page);
-       }
        if (check_free)
                bad += free_pages_check(page);
        if (bad)
@@ -3756,12 +3754,10 @@ no_zone:
        }
 
 out:
-       if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) {
-               if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) {
-                       __free_pages(page, order);
-                       page = NULL;
-               } else
-                       __SetPageKmemcg(page);
+       if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
+           unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
+               __free_pages(page, order);
+               page = NULL;
        }
 
        if (kmemcheck_enabled && page)
@@ -4761,6 +4757,8 @@ int local_memory_node(int node)
 }
 #endif
 
+static void setup_min_unmapped_ratio(void);
+static void setup_min_slab_ratio(void);
 #else  /* CONFIG_NUMA */
 
 static void set_zonelist_order(void)
@@ -5882,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
                zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
 #ifdef CONFIG_NUMA
                zone->node = nid;
-               pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
-                                               / 100;
-               pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
 #endif
                zone->name = zone_names[j];
                zone->zone_pgdat = pgdat;
@@ -6805,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void)
        setup_per_zone_wmarks();
        refresh_zone_stat_thresholds();
        setup_per_zone_lowmem_reserve();
+
+#ifdef CONFIG_NUMA
+       setup_min_unmapped_ratio();
+       setup_min_slab_ratio();
+#endif
+
        return 0;
 }
 core_initcall(init_per_zone_wmark_min)
@@ -6846,43 +6847,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
 }
 
 #ifdef CONFIG_NUMA
+static void setup_min_unmapped_ratio(void)
+{
+       pg_data_t *pgdat;
+       struct zone *zone;
+
+       for_each_online_pgdat(pgdat)
+               pgdat->min_unmapped_pages = 0;
+
+       for_each_zone(zone)
+               zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
+                               sysctl_min_unmapped_ratio) / 100;
+}
+
+
 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
-       struct pglist_data *pgdat;
-       struct zone *zone;
        int rc;
 
        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
+       setup_min_unmapped_ratio();
+
+       return 0;
+}
+
+static void setup_min_slab_ratio(void)
+{
+       pg_data_t *pgdat;
+       struct zone *zone;
+
        for_each_online_pgdat(pgdat)
                pgdat->min_slab_pages = 0;
 
        for_each_zone(zone)
-               zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
-                               sysctl_min_unmapped_ratio) / 100;
-       return 0;
+               zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
+                               sysctl_min_slab_ratio) / 100;
 }
 
 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
-       struct pglist_data *pgdat;
-       struct zone *zone;
        int rc;
 
        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
-       for_each_online_pgdat(pgdat)
-               pgdat->min_slab_pages = 0;
+       setup_min_slab_ratio();
 
-       for_each_zone(zone)
-               zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
-                               sysctl_min_slab_ratio) / 100;
        return 0;
 }
 #endif
index 709bc83703b1bfef419fa674ef5b5e28f5d70f05..1ef36404e7b2d7daeef2061ff8f79524d7750bb9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound)
                VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
                __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
        } else {
-               if (PageTransCompound(page)) {
-                       VM_BUG_ON_PAGE(!PageLocked(page), page);
+               if (PageTransCompound(page) && page_mapping(page)) {
+                       VM_WARN_ON_ONCE(!PageLocked(page));
+
                        SetPageDoubleMap(compound_head(page));
                        if (PageMlocked(page))
                                clear_page_mlock(compound_head(page));
@@ -1303,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
 {
        int i, nr = 1;
 
-       VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
+       VM_BUG_ON_PAGE(compound && !PageHead(page), page);
        lock_page_memcg(page);
 
        /* Hugepages are not counted in NR_FILE_MAPPED for now. */
index 7f7748a0f9e1f738fd1ffcaceccdee2ae54d8d35..fd8b2b5741b141a7bc4457d93929c100d988a639 100644 (file)
@@ -3975,7 +3975,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
 
 struct kobj_attribute shmem_enabled_attr =
        __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
+#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
 
+#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
 bool shmem_huge_enabled(struct vm_area_struct *vma)
 {
        struct inode *inode = file_inode(vma->vm_file);
@@ -4006,7 +4008,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
                        return false;
        }
 }
-#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
+#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 
 #else /* !CONFIG_SHMEM */
 
index 261147ba156fb855c525975a0478d98096c7d0a2..b67271024135aff957f1361c3e327da41915b4c2 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4441,6 +4441,36 @@ static int __init slab_proc_init(void)
 module_init(slab_proc_init);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page)
+{
+       struct kmem_cache *cachep;
+       unsigned int objnr;
+       unsigned long offset;
+
+       /* Find and validate object. */
+       cachep = page->slab_cache;
+       objnr = obj_to_index(cachep, page, (void *)ptr);
+       BUG_ON(objnr >= cachep->num);
+
+       /* Find offset within object. */
+       offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+
+       /* Allow address range falling entirely within object size. */
+       if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+               return NULL;
+
+       return cachep->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 /**
  * ksize - get the actual amount of memory allocated for a given object
  * @objp: Pointer to the object
index 850737bdfbd82410dcd9e0e87d64ea808b0e39c7..9adae58462f8191b22659b1aa438ec637f6fc765 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
  */
 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
+       LIST_HEAD(discard);
        struct page *page, *h;
 
        BUG_ON(irqs_disabled());
@@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
        list_for_each_entry_safe(page, h, &n->partial, lru) {
                if (!page->inuse) {
                        remove_partial(n, page);
-                       discard_slab(s, page);
+                       list_add(&page->lru, &discard);
                } else {
                        list_slab_objects(s, page,
                        "Objects remaining in %s on __kmem_cache_shutdown()");
                }
        }
        spin_unlock_irq(&n->list_lock);
+
+       list_for_each_entry_safe(page, h, &discard, lru)
+               discard_slab(s, page);
 }
 
 /*
@@ -3764,6 +3768,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page)
+{
+       struct kmem_cache *s;
+       unsigned long offset;
+       size_t object_size;
+
+       /* Find object and usable object size. */
+       s = page->slab_cache;
+       object_size = slab_ksize(s);
+
+       /* Reject impossible pointers. */
+       if (ptr < page_address(page))
+               return s->name;
+
+       /* Find offset within object. */
+       offset = (ptr - page_address(page)) % s->size;
+
+       /* Adjust for redzone and reject if within the redzone. */
+       if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+               if (offset < s->red_left_pad)
+                       return s->name;
+               offset -= s->red_left_pad;
+       }
+
+       /* Allow address range falling entirely within object size. */
+       if (offset <= object_size && n <= object_size - offset)
+               return NULL;
+
+       return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 static size_t __ksize(const void *object)
 {
        struct page *page;
diff --git a/mm/usercopy.c b/mm/usercopy.c
new file mode 100644 (file)
index 0000000..8ebae91
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+ * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
+ * which are designed to protect kernel memory from needless exposure
+ * and overwrite under many unintended conditions. This code is based
+ * on PAX_USERCOPY, which is:
+ *
+ * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
+ * Security Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+
+enum {
+       BAD_STACK = -1,
+       NOT_STACK = 0,
+       GOOD_FRAME,
+       GOOD_STACK,
+};
+
+/*
+ * Checks if a given pointer and length is contained by the current
+ * stack frame (if possible).
+ *
+ * Returns:
+ *     NOT_STACK: not at all on the stack
+ *     GOOD_FRAME: fully within a valid stack frame
+ *     GOOD_STACK: fully on the stack (when can't do frame-checking)
+ *     BAD_STACK: error condition (invalid stack position or bad stack frame)
+ */
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+       const void * const stack = task_stack_page(current);
+       const void * const stackend = stack + THREAD_SIZE;
+       int ret;
+
+       /* Object is not on the stack at all. */
+       if (obj + len <= stack || stackend <= obj)
+               return NOT_STACK;
+
+       /*
+        * Reject: object partially overlaps the stack (passing the
+        * the check above means at least one end is within the stack,
+        * so if this check fails, the other end is outside the stack).
+        */
+       if (obj < stack || stackend < obj + len)
+               return BAD_STACK;
+
+       /* Check if object is safely within a valid frame. */
+       ret = arch_within_stack_frames(stack, stackend, obj, len);
+       if (ret)
+               return ret;
+
+       return GOOD_STACK;
+}
+
+static void report_usercopy(const void *ptr, unsigned long len,
+                           bool to_user, const char *type)
+{
+       pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+               to_user ? "exposure" : "overwrite",
+               to_user ? "from" : "to", ptr, type ? : "unknown", len);
+       /*
+        * For greater effect, it would be nice to do do_group_exit(),
+        * but BUG() actually hooks all the lock-breaking and per-arch
+        * Oops code, so that is used here instead.
+        */
+       BUG();
+}
+
+/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
+static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
+                    unsigned long high)
+{
+       unsigned long check_low = (uintptr_t)ptr;
+       unsigned long check_high = check_low + n;
+
+       /* Does not overlap if entirely above or entirely below. */
+       if (check_low >= high || check_high < low)
+               return false;
+
+       return true;
+}
+
+/* Is this address range in the kernel text area? */
+static inline const char *check_kernel_text_object(const void *ptr,
+                                                  unsigned long n)
+{
+       unsigned long textlow = (unsigned long)_stext;
+       unsigned long texthigh = (unsigned long)_etext;
+       unsigned long textlow_linear, texthigh_linear;
+
+       if (overlaps(ptr, n, textlow, texthigh))
+               return "<kernel text>";
+
+       /*
+        * Some architectures have virtual memory mappings with a secondary
+        * mapping of the kernel text, i.e. there is more than one virtual
+        * kernel address that points to the kernel image. It is usually
+        * when there is a separate linear physical memory mapping, in that
+        * __pa() is not just the reverse of __va(). This can be detected
+        * and checked:
+        */
+       textlow_linear = (unsigned long)__va(__pa(textlow));
+       /* No different mapping: we're done. */
+       if (textlow_linear == textlow)
+               return NULL;
+
+       /* Check the secondary mapping... */
+       texthigh_linear = (unsigned long)__va(__pa(texthigh));
+       if (overlaps(ptr, n, textlow_linear, texthigh_linear))
+               return "<linear kernel text>";
+
+       return NULL;
+}
+
+static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+{
+       /* Reject if object wraps past end of memory. */
+       if (ptr + n < ptr)
+               return "<wrapped address>";
+
+       /* Reject if NULL or ZERO-allocation. */
+       if (ZERO_OR_NULL_PTR(ptr))
+               return "<null>";
+
+       return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+                                           bool to_user)
+{
+       struct page *page, *endpage;
+       const void *end = ptr + n - 1;
+       bool is_reserved, is_cma;
+
+       /*
+        * Some architectures (arm64) return true for virt_addr_valid() on
+        * vmalloced addresses. Work around this by checking for vmalloc
+        * first.
+        */
+       if (is_vmalloc_addr(ptr))
+               return NULL;
+
+       if (!virt_addr_valid(ptr))
+               return NULL;
+
+       page = virt_to_head_page(ptr);
+
+       /* Check slab allocator for flags and size. */
+       if (PageSlab(page))
+               return __check_heap_object(ptr, n, page);
+
+       /*
+        * Sometimes the kernel data regions are not marked Reserved (see
+        * check below). And sometimes [_sdata,_edata) does not cover
+        * rodata and/or bss, so check each range explicitly.
+        */
+
+       /* Allow reads of kernel rodata region (if not marked as Reserved). */
+       if (ptr >= (const void *)__start_rodata &&
+           end <= (const void *)__end_rodata) {
+               if (!to_user)
+                       return "<rodata>";
+               return NULL;
+       }
+
+       /* Allow kernel data region (if not marked as Reserved). */
+       if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
+               return NULL;
+
+       /* Allow kernel bss region (if not marked as Reserved). */
+       if (ptr >= (const void *)__bss_start &&
+           end <= (const void *)__bss_stop)
+               return NULL;
+
+       /* Is the object wholly within one base page? */
+       if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
+                  ((unsigned long)end & (unsigned long)PAGE_MASK)))
+               return NULL;
+
+       /* Allow if start and end are inside the same compound page. */
+       endpage = virt_to_head_page(end);
+       if (likely(endpage == page))
+               return NULL;
+
+       /*
+        * Reject if range is entirely either Reserved (i.e. special or
+        * device memory), or CMA. Otherwise, reject since the object spans
+        * several independently allocated pages.
+        */
+       is_reserved = PageReserved(page);
+       is_cma = is_migrate_cma_page(page);
+       if (!is_reserved && !is_cma)
+               goto reject;
+
+       for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
+               page = virt_to_head_page(ptr);
+               if (is_reserved && !PageReserved(page))
+                       goto reject;
+               if (is_cma && !is_migrate_cma_page(page))
+                       goto reject;
+       }
+
+       return NULL;
+
+reject:
+       return "<spans multiple pages>";
+}
+
+/*
+ * Validates that the given object is:
+ * - not bogus address
+ * - known-safe heap or stack object
+ * - not in kernel text
+ */
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+       const char *err;
+
+       /* Skip all tests if size is zero. */
+       if (!n)
+               return;
+
+       /* Check for invalid addresses. */
+       err = check_bogus_address(ptr, n);
+       if (err)
+               goto report;
+
+       /* Check for bad heap object. */
+       err = check_heap_object(ptr, n, to_user);
+       if (err)
+               goto report;
+
+       /* Check for bad stack object. */
+       switch (check_stack_object(ptr, n)) {
+       case NOT_STACK:
+               /* Object is not touching the current process stack. */
+               break;
+       case GOOD_FRAME:
+       case GOOD_STACK:
+               /*
+                * Object is either in the correct frame (when it
+                * is possible to check) or just generally on the
+                * process stack (when frame checking not available).
+                */
+               return;
+       default:
+               err = "<process stack>";
+               goto report;
+       }
+
+       /* Check for object in kernel to avoid text exposure. */
+       err = check_kernel_text_object(ptr, n);
+       if (!err)
+               return;
+
+report:
+       report_usercopy(ptr, n, to_user, err);
+}
+EXPORT_SYMBOL(__check_object_size);
index c83326c5ba580480b877079d2a465430ff408cf5..ef34a02719d73147f4a4af29f3d861b4dc34391e 100644 (file)
@@ -574,7 +574,7 @@ static void complete_generic_request(struct ceph_mon_generic_request *req)
        put_generic_request(req);
 }
 
-void cancel_generic_request(struct ceph_mon_generic_request *req)
+static void cancel_generic_request(struct ceph_mon_generic_request *req)
 {
        struct ceph_mon_client *monc = req->monc;
        struct ceph_mon_generic_request *lookup_req;
index b5ec09612ff71daeb1b95ed4b6f939a172cc7545..a97e7b506612b4255f4b99de76d74c46a1b3896d 100644 (file)
@@ -4220,7 +4220,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
 
                pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
                                               GFP_NOIO);
-               if (!pages) {
+               if (IS_ERR(pages)) {
                        ceph_msg_put(m);
                        return NULL;
                }
index ca53c8319209469a25011b15d26af951a09d392d..22fb96efcf3467713a9e5430057ead684326e3db 100644 (file)
@@ -84,12 +84,6 @@ retry:
 }
 EXPORT_SYMBOL(ceph_find_or_create_string);
 
-static void ceph_free_string(struct rcu_head *head)
-{
-       struct ceph_string *cs = container_of(head, struct ceph_string, rcu);
-       kfree(cs);
-}
-
 void ceph_release_string(struct kref *ref)
 {
        struct ceph_string *cs = container_of(ref, struct ceph_string, kref);
@@ -101,7 +95,7 @@ void ceph_release_string(struct kref *ref)
        }
        spin_unlock(&string_tree_lock);
 
-       call_rcu(&cs->rcu, ceph_free_string);
+       kfree_rcu(cs, rcu);
 }
 EXPORT_SYMBOL(ceph_release_string);
 
index 15b196fc2f49b35f6663b6b359493f6a27564797..179219845dfcdfbeb586d12c5ec1296095d9fbf4 100644 (file)
@@ -108,16 +108,20 @@ as-option = $(call try-run,\
 as-instr = $(call try-run,\
        printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
 
+# Do not attempt to build with gcc plugins during cc-option tests.
+# (And this uses delayed resolution so the flags will be up to date.)
+CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+
 # cc-option
 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
 
 cc-option = $(call try-run,\
-       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
 
 # cc-option-yn
 # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
 cc-option-yn = $(call try-run,\
-       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
 
 # cc-option-align
 # Prefix align with either -falign or -malign
@@ -127,7 +131,7 @@ cc-option-align = $(subst -functions=0,,\
 # cc-disable-warning
 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
 cc-disable-warning = $(call try-run,\
-       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
 # cc-name
 # Expands to either gcc or clang
index 5e22b60589c1ea7b5155daf92ff9d59cf33019b3..61f0e6db909bbf0a7016d60f4b58a4c4bd3c0dba 100644 (file)
@@ -19,25 +19,42 @@ ifdef CONFIG_GCC_PLUGINS
     endif
   endif
 
-  GCC_PLUGINS_CFLAGS := $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y))
+  GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
 
-  export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN SANCOV_PLUGIN
+  export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN
 
+  ifneq ($(PLUGINCC),)
+    # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
+    GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
+  endif
+
+  KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+  GCC_PLUGIN := $(gcc-plugin-y)
+  GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y)
+endif
+
+# If plugins aren't supported, abort the build before hard-to-read compiler
+# errors start getting spewed by the main build.
+PHONY += gcc-plugins-check
+gcc-plugins-check: FORCE
+ifdef CONFIG_GCC_PLUGINS
   ifeq ($(PLUGINCC),)
     ifneq ($(GCC_PLUGINS_CFLAGS),)
       ifeq ($(call cc-ifversion, -ge, 0405, y), y)
-        PLUGINCC := $(shell $(CONFIG_SHELL) -x $(srctree)/scripts/gcc-plugin.sh "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)")
-        $(warning warning: your gcc installation does not support plugins, perhaps the necessary headers are missing?)
+       $(Q)$(srctree)/scripts/gcc-plugin.sh --show-error "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)" || true
+       @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc installation does not support plugins, perhaps the necessary headers are missing?" >&2 && exit 1
       else
-        $(warning warning: your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least)
+       @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc version does not support plugins, you should upgrade it to at least gcc 4.5" >&2 && exit 1
       endif
     endif
-  else
-    # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
-    GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
   endif
+endif
+       @:
 
-  KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-  GCC_PLUGIN := $(gcc-plugin-y)
-
+# Actually do the build, if requested.
+PHONY += gcc-plugins
+gcc-plugins: scripts_basic gcc-plugins-check
+ifdef CONFIG_GCC_PLUGINS
+       $(Q)$(MAKE) $(build)=scripts/gcc-plugins
 endif
+       @:
index fb92075654711393cd19af2c40fb37dcf7605dd0..b65224bfb847302db905ed2ac33dc5d598b9b48b 100755 (executable)
@@ -1,5 +1,12 @@
 #!/bin/sh
 srctree=$(dirname "$0")
+
+SHOW_ERROR=
+if [ "$1" = "--show-error" ] ; then
+       SHOW_ERROR=1
+       shift || true
+fi
+
 gccplugins_dir=$($3 -print-file-name=plugin)
 plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <<EOF
 #include "gcc-common.h"
@@ -13,6 +20,9 @@ EOF
 
 if [ $? -ne 0 ]
 then
+       if [ -n "$SHOW_ERROR" ] ; then
+               echo "${plugincc}" >&2
+       fi
        exit 1
 fi
 
@@ -48,4 +58,8 @@ then
        echo "$2"
        exit 0
 fi
+
+if [ -n "$SHOW_ERROR" ] ; then
+       echo "${plugincc}" >&2
+fi
 exit 1
index 88c8ec47232b1c8595992109fbdd8a503cc6cc09..8b29dc17c73cad2730531464d3528c27973cb659 100644 (file)
@@ -12,16 +12,18 @@ else
   export HOST_EXTRACXXFLAGS
 endif
 
-export GCCPLUGINS_DIR HOSTLIBS
-
 ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN))
   GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN))
 endif
 
-$(HOSTLIBS)-y := $(GCC_PLUGIN)
+export HOSTLIBS
+
+$(HOSTLIBS)-y := $(foreach p,$(GCC_PLUGIN),$(if $(findstring /,$(p)),,$(p)))
 always := $($(HOSTLIBS)-y)
 
-cyc_complexity_plugin-objs := cyc_complexity_plugin.o
-sancov_plugin-objs := sancov_plugin.o
+$(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o))
+
+subdir-y := $(GCC_PLUGIN_SUBDIR)
+subdir-  += $(GCC_PLUGIN_SUBDIR)
 
 clean-files += *.so
index 122fcdaf42c86cec7a5fbce08cc3b406f692f6c5..49a00d54b835f156745c27bfb12b1f64bcf9140c 100755 (executable)
@@ -432,7 +432,7 @@ foreach my $file (@ARGV) {
            die "$P: file '${file}' not found\n";
        }
     }
-    if ($from_filename || vcs_file_exists($file)) {
+    if ($from_filename || ($file ne "&STDIN" && vcs_file_exists($file))) {
        $file =~ s/^\Q${cur_path}\E//;  #strip any absolute path
        $file =~ s/^\Q${lk_path}\E//;   #or the path to the lk tree
        push(@files, $file);
index 176758cdfa577f4c25e3d4afdea4f6292a0be0c3..df28f2b6f3e1b47ab9a290c02f74b2da5f703196 100644 (file)
@@ -118,6 +118,34 @@ config LSM_MMAP_MIN_ADDR
          this low address space will need the permission specific to the
          systems running LSM.
 
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+       bool
+       help
+         The heap allocator implements __check_heap_object() for
+         validating memory ranges against heap object sizes in
+         support of CONFIG_HARDENED_USERCOPY.
+
+config HAVE_ARCH_HARDENED_USERCOPY
+       bool
+       help
+         The architecture supports CONFIG_HARDENED_USERCOPY by
+         calling check_object_size() just before performing the
+         userspace copies in the low level implementation of
+         copy_to_user() and copy_from_user().
+
+config HARDENED_USERCOPY
+       bool "Harden memory copies between kernel and userspace"
+       depends on HAVE_ARCH_HARDENED_USERCOPY
+       select BUG
+       help
+         This option checks for obviously wrong memory regions when
+         copying memory to/from the kernel (via copy_to_user() and
+         copy_from_user() functions) by rejecting memory ranges that
+         are larger than the specified heap object, span multiple
+         separately allocates pages, are not on the process stack,
+         or are part of the kernel text. This kills entire classes
+         of heap overflow exploits and similar kernel memory exposures.
+
 source security/selinux/Kconfig
 source security/smack/Kconfig
 source security/tomoyo/Kconfig