Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 Nov 2015 04:39:38 +0000 (20:39 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 Nov 2015 04:39:38 +0000 (20:39 -0800)
Pull drm fixes from Dave Airlie:
 "A varied bunch of fixes, the radeon pull is probably a bit larger than
  I'd like, but it contains 2 weeks of stuff, and the Fiji fixes are a
  bit large, but they are Fiji specific.

  Otherwise:

   - mgag200: One cursor regression oops fix.
   - vc4: A few small fixes and cleanups.
   - core: Atomic fixes and Atomic helper fixes
   - i915: Revert for the backlight regression along with a bunch of
     fixes"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (58 commits)
  drm/atomic-helper: Check encoder/crtc constraints
  Revert "drm/i915: skip modeset if compatible for everyone."
  drm/mgag200: fix kernel hang in cursor code.
  drm/amdgpu: reserve/unreserve objects out of map/unmap operations
  drm/amdgpu: move bo_reserve out of amdgpu_vm_clear_bo
  drm/amdgpu: add lock for interval tree in vm
  drm/amdgpu: keep the owner for VMIDs
  drm/amdgpu: move VM manager clean into the VM code again
  drm/amdgpu: cleanup VM coding style
  drm/amdgpu: remove unused VM manager field
  drm/amdgpu: cleanup scheduler command submission
  drm/amdgpu: fix typo in firmware name
  drm/i915: Consider SPLL as another shared pll, v2.
  drm/i915: Fix gpu frequency change tracing
  drm/vc4: Make sure that planes aren't scaled.
  drm/vc4: Fix some failure to track __iomem decorations on pointers.
  drm/vc4: checking for NULL instead of IS_ERR
  drm/vc4: fix itnull.cocci warnings
  drm/vc4: fix platform_no_drv_owner.cocci warnings
  drm/vc4: vc4_plane_duplicate_state() can be static
  ...

23 files changed:
Documentation/IPMI.txt
MAINTAINERS
arch/arm64/crypto/aes-ce-cipher.c
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/compat.h
arch/arm64/include/asm/dma-mapping.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/suspend.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/mmu.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/hid/wacom_wac.c
drivers/mmc/card/block.c
drivers/mmc/core/mmc.c
drivers/mmc/host/Kconfig
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/pxamci.c
drivers/sh/pm_runtime.c
kernel/livepatch/core.c

index 31d1d658827f082f66c88c3147e99be3321635cf..c0d8788e75d3f1f2a335a27a883f161ad51128ac 100644 (file)
@@ -587,7 +587,7 @@ used to control it:
 
   modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
       preaction=<preaction type> preop=<preop type> start_now=x
-      nowayout=x ifnum_to_use=n
+      nowayout=x ifnum_to_use=n panic_wdt_timeout=<t>
 
 ifnum_to_use specifies which interface the watchdog timer should use.
 The default is -1, which means to pick the first one registered.
@@ -597,7 +597,9 @@ is the amount of seconds before the reset that the pre-timeout panic will
 occur (if pretimeout is zero, then pretimeout will not be enabled).  Note
 that the pretimeout is the time before the final timeout.  So if the
 timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout
-will occur in 40 second (10 seconds before the timeout).
+will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout
+is the value of timeout which is set on kernel panic, in order to let actions
+such as kdump to occur during panic.
 
 The action may be "reset", "power_cycle", or "power_off", and
 specifies what to do when the timer times out, and defaults to
@@ -634,6 +636,7 @@ for configuring the watchdog:
        ipmi_watchdog.preop=<preop type>
        ipmi_watchdog.start_now=x
        ipmi_watchdog.nowayout=x
+       ipmi_watchdog.panic_wdt_timeout=<t>
 
 The options are the same as the module parameter options.
 
index ea1751283b4961cd90848b9fb8994db0f6711307..b16bffabe70a84e3b6673c67059998cf6d1770d6 100644 (file)
@@ -9315,7 +9315,6 @@ F:        drivers/i2c/busses/i2c-designware-*
 F:     include/linux/platform_data/i2c-designware.h
 
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
-M:     Seungwon Jeon <tgih.jun@samsung.com>
 M:     Jaehoon Chung <jh80.chung@samsung.com>
 L:     linux-mmc@vger.kernel.org
 S:     Maintained
index ce47792a983dda8b2909609502ef80d54c1215f8..f7bd9bf0bbb398bbb2a355c1b24d1f9c035bacf4 100644 (file)
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey);
 static struct crypto_alg aes_alg = {
        .cra_name               = "aes",
        .cra_driver_name        = "aes-ce",
-       .cra_priority           = 300,
+       .cra_priority           = 250,
        .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          = AES_BLOCK_SIZE,
        .cra_ctxsize            = sizeof(struct crypto_aes_ctx),
index 624f9679f4b0abb3e41da2c7ec23c4a7625196e3..9622eb48f894db3fdb06db5441339babc2a1fdeb 100644 (file)
@@ -64,27 +64,31 @@ do {                                                                        \
 
 #define smp_load_acquire(p)                                            \
 ({                                                                     \
-       typeof(*p) ___p1;                                               \
+       union { typeof(*p) __val; char __c[1]; } __u;                   \
        compiletime_assert_atomic_type(*p);                             \
        switch (sizeof(*p)) {                                           \
        case 1:                                                         \
                asm volatile ("ldarb %w0, %1"                           \
-                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+                       : "=r" (*(__u8 *)__u.__c)                       \
+                       : "Q" (*p) : "memory");                         \
                break;                                                  \
        case 2:                                                         \
                asm volatile ("ldarh %w0, %1"                           \
-                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+                       : "=r" (*(__u16 *)__u.__c)                      \
+                       : "Q" (*p) : "memory");                         \
                break;                                                  \
        case 4:                                                         \
                asm volatile ("ldar %w0, %1"                            \
-                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+                       : "=r" (*(__u32 *)__u.__c)                      \
+                       : "Q" (*p) : "memory");                         \
                break;                                                  \
        case 8:                                                         \
                asm volatile ("ldar %0, %1"                             \
-                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+                       : "=r" (*(__u64 *)__u.__c)                      \
+                       : "Q" (*p) : "memory");                         \
                break;                                                  \
        }                                                               \
-       ___p1;                                                          \
+       __u.__val;                                                      \
 })
 
 #define read_barrier_depends()         do { } while(0)
index 7fbed6919b54041f8fd925f8661a72771e733461..eb8432bb82b8dd698c9bdd5723cc870b36158b25 100644 (file)
@@ -23,7 +23,6 @@
  */
 #include <linux/types.h>
 #include <linux/sched.h>
-#include <linux/ptrace.h>
 
 #define COMPAT_USER_HZ         100
 #ifdef __AARCH64EB__
@@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
+#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
 
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
index 54d0ead41afc656e8b06a0850860b164a8b67eaa..61e08f360e31da1f92a38881cd2cf924d2f03c82 100644 (file)
@@ -18,7 +18,6 @@
 
 #ifdef __KERNEL__
 
-#include <linux/acpi.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 
 #include <asm/xen/hypervisor.h>
 
 #define DMA_ERROR_CODE (~(dma_addr_t)0)
-extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops dummy_dma_ops;
 
 static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
 {
-       if (unlikely(!dev))
-               return dma_ops;
-       else if (dev->archdata.dma_ops)
+       if (dev && dev->archdata.dma_ops)
                return dev->archdata.dma_ops;
-       else if (acpi_disabled)
-               return dma_ops;
 
        /*
-        * When ACPI is enabled, if arch_set_dma_ops is not called,
-        * we will disable device DMA capability by setting it
-        * to dummy_dma_ops.
+        * We expect no ISA devices, and all other DMA masters are expected to
+        * have someone call arch_setup_dma_ops at device creation time.
         */
        return &dummy_dma_ops;
 }
index c0e87898ba96b04f2e5b847a0dacf01f4eb1dff1..24165784b8038b732ea568d1e74fd8c0a699b914 100644 (file)
@@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void)
 #define destroy_context(mm)            do { } while(0)
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
 
-#define init_new_context(tsk,mm)       ({ atomic64_set(&mm->context.id, 0); 0; })
+#define init_new_context(tsk,mm)       ({ atomic64_set(&(mm)->context.id, 0); 0; })
 
 /*
  * This is called when "tsk" is about to enter lazy TLB mode.
index 9819a9426b69a936017e508a6cab907040c9c67a..7e074f93f383fea1891ce79d4048c22f647841a7 100644 (file)
@@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 
 #define PAGE_KERNEL            __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_RO         __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX        __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
 #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC_CONT  __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
index 706679d0a0b4227c4a7267cd85142192576d7ab3..212ae6361d8be45d6d73d197284a1ca587599342 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/seq_file.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
+#include <linux/delay.h>
 
 /*
  * In case the boot CPU is hotpluggable, we record its initial state and
@@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v)
                 */
                seq_printf(m, "processor\t: %d\n", i);
 
+               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+                          loops_per_jiffy / (500000UL/HZ),
+                          loops_per_jiffy / (5000UL/HZ) % 100);
+
                /*
                 * Dump out the common processor features in a single line.
                 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
index de46b50f4cdf952087e77d473314e75728efeec0..fc5508e0df57ff8a132894d091c94938d299ee02 100644 (file)
@@ -224,6 +224,8 @@ static bool __init efi_virtmap_init(void)
 {
        efi_memory_desc_t *md;
 
+       init_new_context(NULL, &efi_mm);
+
        for_each_efi_memory_desc(&memmap, md) {
                u64 paddr, npages, size;
                pgprot_t prot;
@@ -254,7 +256,8 @@ static bool __init efi_virtmap_init(void)
                else
                        prot = PAGE_KERNEL;
 
-               create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
+               create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size,
+                                  __pgprot(pgprot_val(prot) | PTE_NG));
        }
        return true;
 }
@@ -329,14 +332,7 @@ core_initcall(arm64_dmi_init);
 
 static void efi_set_pgd(struct mm_struct *mm)
 {
-       if (mm == &init_mm)
-               cpu_set_reserved_ttbr0();
-       else
-               cpu_switch_mm(mm->pgd, mm);
-
-       local_flush_tlb_all();
-       if (icache_is_aivivt())
-               __local_flush_icache_all();
+       switch_mm(NULL, mm, NULL);
 }
 
 void efi_virtmap_load(void)
index fce95e17cf7f268fdfc0dd5c2d91e00b1152fb3f..1095aa483a1c28e5387b23895c14d7a1746268a3 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/ftrace.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <asm/cacheflush.h>
@@ -70,6 +71,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
         */
        local_dbg_save(flags);
 
+       /*
+        * Function graph tracer state gets incosistent when the kernel
+        * calls functions that never return (aka suspend finishers) hence
+        * disable graph tracing during their execution.
+        */
+       pause_graph_tracing();
+
        /*
         * mm context saved on the stack, it will be restored when
         * the cpu comes out of reset through the identity mapped
@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
                        hw_breakpoint_restore(NULL);
        }
 
+       unpause_graph_tracing();
+
        /*
         * Restore pstate flags. OS lock and mdscr have been already
         * restored, so from this point onwards, debugging is fully
index 131a199114b405e8403f05137e560a2b317f4941..7963aa4b5d2869b70dfd9a2f3d0ef501a033480b 100644 (file)
@@ -18,6 +18,7 @@
  */
 
 #include <linux/gfp.h>
+#include <linux/acpi.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/genalloc.h>
@@ -28,9 +29,6 @@
 
 #include <asm/cacheflush.h>
 
-struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
                                 bool coherent)
 {
@@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
 
 static int __init arm64_dma_init(void)
 {
-       int ret;
-
-       dma_ops = &swiotlb_dma_ops;
-
-       ret = atomic_pool_init();
-
-       return ret;
+       return atomic_pool_init();
 }
 arch_initcall(arm64_dma_init);
 
@@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
 {
        bool coherent = is_device_dma_coherent(dev);
        int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
+       size_t iosize = size;
        void *addr;
 
        if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
                return NULL;
+
+       size = PAGE_ALIGN(size);
+
        /*
         * Some drivers rely on this, and we probably don't want the
         * possibility of stale kernel data being read by devices anyway.
@@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                struct page **pages;
                pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
 
-               pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle,
+               pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
                                        flush_page);
                if (!pages)
                        return NULL;
@@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
                                              __builtin_return_address(0));
                if (!addr)
-                       iommu_dma_free(dev, pages, size, handle);
+                       iommu_dma_free(dev, pages, iosize, handle);
        } else {
                struct page *page;
                /*
@@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                if (!addr)
                        return NULL;
 
-               *handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
+               *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
                if (iommu_dma_mapping_error(dev, *handle)) {
                        if (coherent)
                                __free_pages(page, get_order(size));
@@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
                               dma_addr_t handle, struct dma_attrs *attrs)
 {
+       size_t iosize = size;
+
+       size = PAGE_ALIGN(size);
        /*
         * @cpu_addr will be one of 3 things depending on how it was allocated:
         * - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
         * Hence how dodgy the below logic looks...
         */
        if (__in_atomic_pool(cpu_addr, size)) {
-               iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+               iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
                __free_from_pool(cpu_addr, size);
        } else if (is_vmalloc_addr(cpu_addr)){
                struct vm_struct *area = find_vm_area(cpu_addr);
 
                if (WARN_ON(!area || !area->pages))
                        return;
-               iommu_dma_free(dev, area->pages, size, &handle);
+               iommu_dma_free(dev, area->pages, iosize, &handle);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else {
-               iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+               iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
                __free_pages(virt_to_page(cpu_addr), get_order(size));
        }
 }
@@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                        struct iommu_ops *iommu, bool coherent)
 {
-       if (!acpi_disabled && !dev->archdata.dma_ops)
-               dev->archdata.dma_ops = dma_ops;
+       if (!dev->archdata.dma_ops)
+               dev->archdata.dma_ops = &swiotlb_dma_ops;
 
        dev->archdata.dma_coherent = coherent;
        __iommu_setup_dma_ops(dev, dma_base, size, iommu);
index e3f563c81c4802c1f9c519fe2bfc7317f8ab5467..abb66f84d4ac896c0978b3c2584470d5db15a8ba 100644 (file)
@@ -362,8 +362,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
         * for now. This will get more fine grained later once all memory
         * is mapped
         */
-       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+       unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
+       unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
 
        if (end < kernel_x_start) {
                create_mapping(start, __phys_to_virt(start),
@@ -451,18 +451,18 @@ static void __init fixup_executable(void)
 {
 #ifdef CONFIG_DEBUG_RODATA
        /* now that we are actually fully mapped, make the start/end more fine grained */
-       if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
+       if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
                unsigned long aligned_start = round_down(__pa(_stext),
-                                                       SECTION_SIZE);
+                                                        SWAPPER_BLOCK_SIZE);
 
                create_mapping(aligned_start, __phys_to_virt(aligned_start),
                                __pa(_stext) - aligned_start,
                                PAGE_KERNEL);
        }
 
-       if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
+       if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
                unsigned long aligned_end = round_up(__pa(__init_end),
-                                                       SECTION_SIZE);
+                                                         SWAPPER_BLOCK_SIZE);
                create_mapping(__pa(__init_end), (unsigned long)__init_end,
                                aligned_end - __pa(__init_end),
                                PAGE_KERNEL);
@@ -475,7 +475,7 @@ void mark_rodata_ro(void)
 {
        create_mapping_late(__pa(_stext), (unsigned long)_stext,
                                (unsigned long)_etext - (unsigned long)_stext,
-                               PAGE_KERNEL_EXEC | PTE_RDONLY);
+                               PAGE_KERNEL_ROX);
 
 }
 #endif
index 654f6f36a071c1411e21373d3cb53e00efadd4f6..55fe9020459f2c4bdcec710a90220b2ea32e54db 100644 (file)
@@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
        return rv;
 }
 
-static void start_check_enables(struct smi_info *smi_info)
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+       smi_info->last_timeout_jiffies = jiffies;
+       mod_timer(&smi_info->si_timer, new_val);
+       smi_info->timer_running = true;
+}
+
+/*
+ * Start a new message and (re)start the timer and thread.
+ */
+static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
+                         unsigned int size)
+{
+       smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+       if (smi_info->thread)
+               wake_up_process(smi_info->thread);
+
+       smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
+}
+
+static void start_check_enables(struct smi_info *smi_info, bool start_timer)
 {
        unsigned char msg[2];
 
        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
-       smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+       if (start_timer)
+               start_new_msg(smi_info, msg, 2);
+       else
+               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
        smi_info->si_state = SI_CHECKING_ENABLES;
 }
 
-static void start_clear_flags(struct smi_info *smi_info)
+static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
 {
        unsigned char msg[3];
 
@@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
        msg[2] = WDT_PRE_TIMEOUT_INT;
 
-       smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+       if (start_timer)
+               start_new_msg(smi_info, msg, 3);
+       else
+               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
        smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
@@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
        smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
        smi_info->curr_msg->data_size = 2;
 
-       smi_info->handlers->start_transaction(
-               smi_info->si_sm,
-               smi_info->curr_msg->data,
-               smi_info->curr_msg->data_size);
+       start_new_msg(smi_info, smi_info->curr_msg->data,
+                     smi_info->curr_msg->data_size);
        smi_info->si_state = SI_GETTING_MESSAGES;
 }
 
@@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
        smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
        smi_info->curr_msg->data_size = 2;
 
-       smi_info->handlers->start_transaction(
-               smi_info->si_sm,
-               smi_info->curr_msg->data,
-               smi_info->curr_msg->data_size);
+       start_new_msg(smi_info, smi_info->curr_msg->data,
+                     smi_info->curr_msg->data_size);
        smi_info->si_state = SI_GETTING_EVENTS;
 }
 
-static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
-{
-       smi_info->last_timeout_jiffies = jiffies;
-       mod_timer(&smi_info->si_timer, new_val);
-       smi_info->timer_running = true;
-}
-
 /*
  * When we have a situtaion where we run out of memory and cannot
  * allocate messages, we just leave them in the BMC and run the system
@@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
  * Note that we cannot just use disable_irq(), since the interrupt may
  * be shared.
  */
-static inline bool disable_si_irq(struct smi_info *smi_info)
+static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
 {
        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = true;
-               start_check_enables(smi_info);
+               start_check_enables(smi_info, start_timer);
                return true;
        }
        return false;
@@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = false;
-               start_check_enables(smi_info);
+               start_check_enables(smi_info, true);
                return true;
        }
        return false;
@@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
 
        msg = ipmi_alloc_smi_msg();
        if (!msg) {
-               if (!disable_si_irq(smi_info))
+               if (!disable_si_irq(smi_info, true))
                        smi_info->si_state = SI_NORMAL;
        } else if (enable_si_irq(smi_info)) {
                ipmi_free_smi_msg(msg);
@@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
                /* Watchdog pre-timeout */
                smi_inc_stat(smi_info, watchdog_pretimeouts);
 
-               start_clear_flags(smi_info);
+               start_clear_flags(smi_info, true);
                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
                if (smi_info->intf)
                        ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
                        msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 
-                       smi_info->handlers->start_transaction(
-                               smi_info->si_sm, msg, 2);
+                       start_new_msg(smi_info, msg, 2);
                        smi_info->si_state = SI_GETTING_FLAGS;
                        goto restart;
                }
@@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                 * disable and messages disabled.
                 */
                if (smi_info->supports_event_msg_buff || smi_info->irq) {
-                       start_check_enables(smi_info);
+                       start_check_enables(smi_info, true);
                } else {
                        smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
                        if (!smi_info->curr_msg)
@@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                }
                goto restart;
        }
+
+       if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
+               /* Ok it if fails, the timer will just go off. */
+               if (del_timer(&smi_info->si_timer))
+                       smi_info->timer_running = false;
+       }
+
  out:
        return si_sm_result;
 }
@@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = {
          .data = (void *)(unsigned long) SI_BT },
        {},
 };
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
 
 static int of_ipmi_probe(struct platform_device *dev)
 {
@@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev)
        }
        return 0;
 }
-MODULE_DEVICE_TABLE(of, of_ipmi_match);
 #else
 #define of_ipmi_match NULL
 static int of_ipmi_probe(struct platform_device *dev)
@@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi)
         * Start clearing the flags before we enable interrupts or the
         * timer to avoid racing with the timer.
         */
-       start_clear_flags(new_smi);
+       start_clear_flags(new_smi, false);
 
        /*
         * IRQ is defined to be set when non-zero.  req_events will
@@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
                poll(to_clean);
                schedule_timeout_uninterruptible(1);
        }
-       disable_si_irq(to_clean);
+       disable_si_irq(to_clean, false);
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
                schedule_timeout_uninterruptible(1);
index 0ac3bd1a5497c5bae41f1cc51ac8cdf667ed73b4..096f0cef4da14152653ea61daa010b367174759d 100644 (file)
@@ -153,6 +153,9 @@ static int timeout = 10;
 /* The pre-timeout is disabled by default. */
 static int pretimeout;
 
+/* Default timeout to set on panic */
+static int panic_wdt_timeout = 255;
+
 /* Default action is to reset the board on a timeout. */
 static unsigned char action_val = WDOG_TIMEOUT_RESET;
 
@@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
 module_param(pretimeout, timeout, 0644);
 MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
 
+module_param(panic_wdt_timeout, timeout, 0644);
+MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
+
 module_param_cb(action, &param_ops_str, action_op, 0644);
 MODULE_PARM_DESC(action, "Timeout action. One of: "
                 "reset, none, power_cycle, power_off.");
@@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this,
                /* Make sure we do this only once. */
                panic_event_handled = 1;
 
-               timeout = 255;
+               timeout = panic_wdt_timeout;
                pretimeout = 0;
                panic_halt_ipmi_set_timeout();
        }
index 8b29949507d1692f436b1db5045ca7ed4b8d54f3..01a4f05c16421b63a09737ad3eb4a4fb249259ae 100644 (file)
@@ -2481,7 +2481,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
                        if (features->touch_max)
                                features->device_type |= WACOM_DEVICETYPE_TOUCH;
-                       if (features->type >= INTUOSHT || features->type <= BAMBOO_PT)
+                       if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
                                features->device_type |= WACOM_DEVICETYPE_PAD;
 
                        features->x_max = 4096;
@@ -3213,7 +3213,8 @@ static const struct wacom_features wacom_features_0x32F =
          WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x336 =
        { "Wacom DTU1141", 23472, 13203, 1023, 0,
-         DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+         DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+         WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x57 =
        { "Wacom DTK2241", 95640, 54060, 2047, 63,
          DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
index 23b6c8e8701ccd6e3ec57c85483276de4b02f51b..d8486168415ae1123f6a31d7126cfc9192fe884b 100644 (file)
@@ -65,8 +65,7 @@ MODULE_ALIAS("mmc:block");
 #define MMC_SANITIZE_REQ_TIMEOUT 240000
 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
 
-#define mmc_req_rel_wr(req)    (((req->cmd_flags & REQ_FUA) || \
-                                 (req->cmd_flags & REQ_META)) && \
+#define mmc_req_rel_wr(req)    ((req->cmd_flags & REQ_FUA) && \
                                  (rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER 0x01
 #define PACKED_CMD_WR  0x02
@@ -1467,13 +1466,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
 
        /*
         * Reliable writes are used to implement Forced Unit Access and
-        * REQ_META accesses, and are supported only on MMCs.
-        *
-        * XXX: this really needs a good explanation of why REQ_META
-        * is treated special.
+        * are supported only on MMCs.
         */
-       bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
-                         (req->cmd_flags & REQ_META)) &&
+       bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
                (rq_data_dir(req) == WRITE) &&
                (md->flags & MMC_BLK_REL_WR);
 
index c793fda27321da0086bb01d92e33d9aa44480ed0..3a9a79ec4343cd0f9f71158768e5707340def684 100644 (file)
@@ -1040,9 +1040,24 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
        return err;
 }
 
+/* Caller must hold re-tuning */
+static int mmc_switch_status(struct mmc_card *card)
+{
+       u32 status;
+       int err;
+
+       err = mmc_send_status(card, &status);
+       if (err)
+               return err;
+
+       return mmc_switch_status_error(card->host, status);
+}
+
 static int mmc_select_hs400(struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
+       bool send_status = true;
+       unsigned int max_dtr;
        int err = 0;
        u8 val;
 
@@ -1053,25 +1068,36 @@ static int mmc_select_hs400(struct mmc_card *card)
              host->ios.bus_width == MMC_BUS_WIDTH_8))
                return 0;
 
-       /*
-        * Before switching to dual data rate operation for HS400,
-        * it is required to convert from HS200 mode to HS mode.
-        */
-       mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
-       mmc_set_bus_speed(card);
+       if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+               send_status = false;
 
+       /* Reduce frequency to HS frequency */
+       max_dtr = card->ext_csd.hs_max_dtr;
+       mmc_set_clock(host, max_dtr);
+
+       /* Switch card to HS mode */
        val = EXT_CSD_TIMING_HS |
              card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                           EXT_CSD_HS_TIMING, val,
                           card->ext_csd.generic_cmd6_time,
-                          true, true, true);
+                          true, send_status, true);
        if (err) {
                pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
                        mmc_hostname(host), err);
                return err;
        }
 
+       /* Set host controller to HS timing */
+       mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+
+       if (!send_status) {
+               err = mmc_switch_status(card);
+               if (err)
+                       goto out_err;
+       }
+
+       /* Switch card to DDR */
        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                         EXT_CSD_BUS_WIDTH,
                         EXT_CSD_DDR_BUS_WIDTH_8,
@@ -1082,22 +1108,35 @@ static int mmc_select_hs400(struct mmc_card *card)
                return err;
        }
 
+       /* Switch card to HS400 */
        val = EXT_CSD_TIMING_HS400 |
              card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                           EXT_CSD_HS_TIMING, val,
                           card->ext_csd.generic_cmd6_time,
-                          true, true, true);
+                          true, send_status, true);
        if (err) {
                pr_err("%s: switch to hs400 failed, err:%d\n",
                         mmc_hostname(host), err);
                return err;
        }
 
+       /* Set host controller to HS400 timing and frequency */
        mmc_set_timing(host, MMC_TIMING_MMC_HS400);
        mmc_set_bus_speed(card);
 
+       if (!send_status) {
+               err = mmc_switch_status(card);
+               if (err)
+                       goto out_err;
+       }
+
        return 0;
+
+out_err:
+       pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+              __func__, err);
+       return err;
 }
 
 int mmc_hs200_to_hs400(struct mmc_card *card)
@@ -1105,19 +1144,6 @@ int mmc_hs200_to_hs400(struct mmc_card *card)
        return mmc_select_hs400(card);
 }
 
-/* Caller must hold re-tuning */
-static int mmc_switch_status(struct mmc_card *card)
-{
-       u32 status;
-       int err;
-
-       err = mmc_send_status(card, &status);
-       if (err)
-               return err;
-
-       return mmc_switch_status_error(card->host, status);
-}
-
 int mmc_hs400_to_hs200(struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
@@ -1219,6 +1245,8 @@ static void mmc_select_driver_type(struct mmc_card *card)
 static int mmc_select_hs200(struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
+       bool send_status = true;
+       unsigned int old_timing;
        int err = -EINVAL;
        u8 val;
 
@@ -1234,6 +1262,9 @@ static int mmc_select_hs200(struct mmc_card *card)
 
        mmc_select_driver_type(card);
 
+       if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+               send_status = false;
+
        /*
         * Set the bus width(4 or 8) with host's support and
         * switch to HS200 mode if bus width is set successfully.
@@ -1245,11 +1276,25 @@ static int mmc_select_hs200(struct mmc_card *card)
                err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                   EXT_CSD_HS_TIMING, val,
                                   card->ext_csd.generic_cmd6_time,
-                                  true, true, true);
-               if (!err)
-                       mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+                                  true, send_status, true);
+               if (err)
+                       goto err;
+               old_timing = host->ios.timing;
+               mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+               if (!send_status) {
+                       err = mmc_switch_status(card);
+                       /*
+                        * mmc_select_timing() assumes timing has not changed if
+                        * it is a switch error.
+                        */
+                       if (err == -EBADMSG)
+                               mmc_set_timing(host, old_timing);
+               }
        }
 err:
+       if (err)
+               pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+                      __func__, err);
        return err;
 }
 
index af71de5fda3b48c99fefd122a0cb6df9af2234ac..1dee533634c986d71176917f4d510a4c7214bf5a 100644 (file)
@@ -473,6 +473,7 @@ config MMC_DAVINCI
 
 config MMC_GOLDFISH
        tristate "goldfish qemu Multimedia Card Interface support"
+       depends on HAS_DMA
        depends on GOLDFISH || COMPILE_TEST
        help
          This selects the Goldfish Multimedia card Interface emulation
index 39568cc29a2a18cf752141dcf6233746b4d149e1..33dfd7e72516c3920b7ec641efb930668f0e15f5 100644 (file)
@@ -1276,7 +1276,7 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
        int start = 0, len = 0;
        int start_final = 0, len_final = 0;
        u8 final_phase = 0xff;
-       struct msdc_delay_phase delay_phase;
+       struct msdc_delay_phase delay_phase = { 0, };
 
        if (delay == 0) {
                dev_err(host->dev, "phase error: [map:%x]\n", delay);
index 8cadd74e8407bb08d7e277a82ac5d80d496f77e4..ce08896b9d696b00440fe7807aeddf72b922f320 100644 (file)
@@ -805,7 +805,7 @@ static int pxamci_probe(struct platform_device *pdev)
                goto out;
        } else {
                mmc->caps |= host->pdata->gpio_card_ro_invert ?
-                       MMC_CAP2_RO_ACTIVE_HIGH : 0;
+                       0 : MMC_CAP2_RO_ACTIVE_HIGH;
        }
 
        if (gpio_is_valid(gpio_cd))
index 25abd4eb7d102113d94c62bf7bbc1b8f935f0ca8..91a003011acfacb277e892b47c99771ceae80ffa 100644 (file)
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
 
 static int __init sh_pm_runtime_init(void)
 {
-       if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
+       if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
                if (!of_find_compatible_node(NULL, NULL,
                                             "renesas,cpg-mstp-clocks"))
                        return 0;
index 6e5344112419ca43ace3474466db35d1692752ab..db545cbcdb8933e1ab4b95a9c276eb523de8ba43 100644 (file)
@@ -294,6 +294,12 @@ static int klp_write_object_relocations(struct module *pmod,
 
        for (reloc = obj->relocs; reloc->name; reloc++) {
                if (!klp_is_module(obj)) {
+
+#if defined(CONFIG_RANDOMIZE_BASE)
+                       /* If KASLR has been enabled, adjust old value accordingly */
+                       if (kaslr_enabled())
+                               reloc->val += kaslr_offset();
+#endif
                        ret = klp_verify_vmlinux_symbol(reloc->name,
                                                        reloc->val);
                        if (ret)