microblaze: Support for WB cache
authorMichal Simek <monstr@monstr.eu>
Thu, 10 Dec 2009 10:43:57 +0000 (11:43 +0100)
committerMichal Simek <monstr@monstr.eu>
Mon, 14 Dec 2009 07:45:10 +0000 (08:45 +0100)
Microblaze version 7.20.d is the first MB version which can be run
on MMU linux. Please do not used previous version because they contain
HW bug.
Based on WB support was necessary to redesign whole cache design.
Microblaze versions from 7.20.a don't need to disable IRQ and cache
before working with them that's why there are special structures for it.

Signed-off-by: Michal Simek <monstr@monstr.eu>
arch/microblaze/include/asm/cacheflush.h
arch/microblaze/kernel/cpu/cache.c
arch/microblaze/kernel/setup.c
arch/microblaze/kernel/signal.c

index 1f04b911145407cc853f8ea5de59d6394783139e..a6edd356cd08c583807fb278913f943172f7ae20 100644 (file)
@@ -18,6 +18,8 @@
 /* Somebody depends on this; sigh... */
 #include <linux/mm.h>
 
+/* Look at Documentation/cachetlb.txt */
+
 /*
  * Cache handling functions.
  * Microblaze has a write-through data cache, meaning that the data cache
  * instruction cache to make sure we don't fetch old, bad code.
  */
 
+/* struct cache, d=dcache, i=icache, fl = flush, iv = invalidate,
+ * suffix r = range */
+struct scache {
+       /* icache */
+       void (*ie)(void); /* enable */
+       void (*id)(void); /* disable */
+       void (*ifl)(void); /* flush */
+       void (*iflr)(unsigned long a, unsigned long b);
+       void (*iin)(void); /* invalidate */
+       void (*iinr)(unsigned long a, unsigned long b);
+       /* dcache */
+       void (*de)(void); /* enable */
+       void (*dd)(void); /* disable */
+       void (*dfl)(void); /* flush */
+       void (*dflr)(unsigned long a, unsigned long b);
+       void (*din)(void); /* invalidate */
+       void (*dinr)(unsigned long a, unsigned long b);
+};
+
+/* microblaze cache */
+extern struct scache *mbc;
+
+void microblaze_cache_init(void);
+
+#define enable_icache()                                        mbc->ie();
+#define disable_icache()                               mbc->id();
+#define flush_icache()                                 mbc->ifl();
+#define flush_icache_range(start, end)                 mbc->iflr(start, end);
+#define invalidate_icache()                            mbc->iin();
+#define invalidate_icache_range(start, end)            mbc->iinr(start, end);
+
+
+#define flush_icache_user_range(vma, pg, adr, len)     flush_icache();
+#define flush_icache_page(vma, pg)                     do { } while (0)
+
+#define enable_dcache()                                        mbc->de();
+#define disable_dcache()                               mbc->dd();
 /* FIXME for LL-temac driver */
-#define invalidate_dcache_range(start, end) \
-                       __invalidate_dcache_range(start, end)
-
-#define flush_cache_all()                      __invalidate_cache_all()
-#define flush_cache_mm(mm)                     do { } while (0)
-#define flush_cache_range(vma, start, end)     __invalidate_cache_all()
-#define flush_cache_page(vma, vmaddr, pfn)     do { } while (0)
+#define invalidate_dcache()                            mbc->din();
+#define invalidate_dcache_range(start, end)            mbc->dinr(start, end);
+#define flush_dcache()                                 mbc->dfl();
+#define flush_dcache_range(start, end)                 mbc->dflr(start, end);
 
-#define flush_dcache_range(start, end) __invalidate_dcache_range(start, end)
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+/* D-cache aliasing problem can't happen - cache is between MMU and ram */
 #define flush_dcache_page(page)                        do { } while (0)
 #define flush_dcache_mmap_lock(mapping)                do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)      do { } while (0)
 
-#define flush_icache_range(start, len) __invalidate_icache_range(start, len)
-#define flush_icache_page(vma, pg)             do { } while (0)
-
-#ifndef CONFIG_MMU
-# define flush_icache_user_range(start, len)   do { } while (0)
-#else
-# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
-
-# define flush_page_to_ram(page)               do { } while (0)
 
-# define flush_icache()                        __invalidate_icache_all()
-# define flush_cache_sigtramp(vaddr) \
-                       __invalidate_icache_range(vaddr, vaddr + 8)
-
-# define flush_dcache_mmap_lock(mapping)       do { } while (0)
-# define flush_dcache_mmap_unlock(mapping)     do { } while (0)
+#define flush_cache_dup_mm(mm)                         do { } while (0)
+#define flush_cache_vmap(start, end)                   do { } while (0)
+#define flush_cache_vunmap(start, end)                 do { } while (0)
+#define flush_cache_mm(mm)                     do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)     do { } while (0)
 
-# define flush_cache_dup_mm(mm)                        do { } while (0)
+/* MS: kgdb code use this macro, wrong len with FLASH */
+#if 0
+#define flush_cache_range(vma, start, len)     {       \
+       flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \
+       flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \
+}
 #endif
 
-#define flush_cache_vmap(start, end)           do { } while (0)
-#define flush_cache_vunmap(start, end)         do { } while (0)
-
-
-void _enable_icache(void);
-void _disable_icache(void);
-void _invalidate_icache(unsigned int addr);
-
-#define __enable_icache()              _enable_icache()
-#define __disable_icache()             _disable_icache()
-#define __invalidate_icache(addr)      _invalidate_icache(addr)
-
-void _enable_dcache(void);
-void _disable_dcache(void);
-void _invalidate_dcache(unsigned int addr);
-
-#define __enable_dcache()              _enable_dcache()
-#define __disable_dcache()             _disable_dcache()
-#define __invalidate_dcache(addr)      _invalidate_dcache(addr)
-
-struct page;
-struct mm_struct;
-struct vm_area_struct;
-
-/* see arch/microblaze/kernel/cache.c */
-extern void __invalidate_icache_all(void);
-extern void __invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __invalidate_icache_page(struct vm_area_struct *vma,
-                               struct page *page);
-extern void __invalidate_icache_user_range(struct vm_area_struct *vma,
-                               struct page *page,
-                               unsigned long adr, int len);
-extern void __invalidate_cache_sigtramp(unsigned long addr);
-
-extern void __invalidate_dcache_all(void);
-extern void __invalidate_dcache_range(unsigned long start, unsigned long end);
-extern void __invalidate_dcache_page(struct vm_area_struct *vma,
-                               struct page *page);
-extern void __invalidate_dcache_user_range(struct vm_area_struct *vma,
-                               struct page *page,
-                               unsigned long adr, int len);
-
-extern inline void __invalidate_cache_all(void)
-{
-       __invalidate_icache_all();
-       __invalidate_dcache_all();
-}
+#define flush_cache_range(vma, start, len) do { } while (0)
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy((dst), (src), (len)); \
-       flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
+do {                                                                   \
+       memcpy((dst), (src), (len));                                    \
+       flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
 } while (0)
 
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy((dst), (src), (len))
+#define copy_from_user_page(vma, page, vaddr, dst, src, len)           \
+do {                                                                   \
+       memcpy((dst), (src), (len));                                    \
+} while (0)
 
 #endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
index 538f1df6761d2d76a0569e104177181f6ed49231..d9d63831cc2f9d6318257fa358ac41d41553c789 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
  *
  * This file is subject to the terms and conditions of the GNU General
  * Public License. See the file COPYING in the main directory of this
 #include <asm/cacheflush.h>
 #include <linux/cache.h>
 #include <asm/cpuinfo.h>
+#include <asm/pvr.h>
 
-/* Exported functions */
+static inline void __invalidate_flush_icache(unsigned int addr)
+{
+       __asm__ __volatile__ ("wic      %0, r0;"        \
+                                       : : "r" (addr));
+}
+
+static inline void __flush_dcache(unsigned int addr)
+{
+       __asm__ __volatile__ ("wdc.flush        %0, r0;"        \
+                                       : : "r" (addr));
+}
+
+static inline void __invalidate_dcache(unsigned int baseaddr,
+                                               unsigned int offset)
+{
+       __asm__ __volatile__ ("wdc.clear        %0, %1;"        \
+                                       : : "r" (baseaddr), "r" (offset));
+}
 
-void _enable_icache(void)
+static inline void __enable_icache_msr(void)
 {
-       if (cpuinfo.use_icache) {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
-               __asm__ __volatile__ ("                                 \
-                               msrset  r0, %0;                         \
-                               nop; "                                  \
-                               :                                       \
-                               : "i" (MSR_ICE)                         \
+       __asm__ __volatile__ (" msrset  r0, %0;         \
+                               nop; "                  \
+                       : : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __disable_icache_msr(void)
+{
+       __asm__ __volatile__ (" msrclr  r0, %0;         \
+                               nop; "                  \
+                       : : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __enable_dcache_msr(void)
+{
+       __asm__ __volatile__ (" msrset  r0, %0;         \
+                               nop; "                  \
+                               :                       \
+                               : "i" (MSR_DCE)         \
                                : "memory");
-#else
-               __asm__ __volatile__ ("                                 \
-                               mfs     r12, rmsr;                      \
-                               nop;                                    \
-                               ori     r12, r12, %0;                   \
-                               mts     rmsr, r12;                      \
-                               nop; "                                  \
-                               :                                       \
-                               : "i" (MSR_ICE)                         \
-                               : "memory", "r12");
-#endif
-       }
 }
 
-void _disable_icache(void)
+static inline void __disable_dcache_msr(void)
 {
-       if (cpuinfo.use_icache) {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
-               __asm__ __volatile__ ("                                 \
-                               msrclr r0, %0;                          \
-                               nop; "                                  \
-                               :                                       \
-                               : "i" (MSR_ICE)                         \
+       __asm__ __volatile__ (" msrclr  r0, %0;         \
+                               nop; "                  \
+                               :                       \
+                               : "i" (MSR_DCE)         \
                                : "memory");
-#else
-               __asm__ __volatile__ ("                                 \
-                               mfs     r12, rmsr;                      \
-                               nop;                                    \
-                               andi    r12, r12, ~%0;                  \
-                               mts     rmsr, r12;                      \
-                               nop; "                                  \
-                               :                                       \
-                               : "i" (MSR_ICE)                         \
+}
+
+static inline void __enable_icache_nomsr(void)
+{
+       __asm__ __volatile__ (" mfs     r12, rmsr;      \
+                               nop;                    \
+                               ori     r12, r12, %0;   \
+                               mts     rmsr, r12;      \
+                               nop; "                  \
+                               :                       \
+                               : "i" (MSR_ICE)         \
                                : "memory", "r12");
-#endif
-       }
 }
 
-void _invalidate_icache(unsigned int addr)
+static inline void __disable_icache_nomsr(void)
 {
-       if (cpuinfo.use_icache) {
-               __asm__ __volatile__ ("                                 \
-                               wic     %0, r0"                         \
-                               :                                       \
-                               : "r" (addr));
-       }
+       __asm__ __volatile__ (" mfs     r12, rmsr;      \
+                               nop;                    \
+                               andi    r12, r12, ~%0;  \
+                               mts     rmsr, r12;      \
+                               nop; "                  \
+                               :                       \
+                               : "i" (MSR_ICE)         \
+                               : "memory", "r12");
 }
 
-void _enable_dcache(void)
+static inline void __enable_dcache_nomsr(void)
 {
-       if (cpuinfo.use_dcache) {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
-               __asm__ __volatile__ ("                                 \
-                               msrset  r0, %0;                         \
-                               nop; "                                  \
-                               :                                       \
-                               : "i" (MSR_DCE)                         \
-                               : "memory");
-#else
-               __asm__ __volatile__ ("                                 \
-                               mfs     r12, rmsr;                      \
-                               nop;                                    \
-                               ori     r12, r12, %0;                   \
-                               mts     rmsr, r12;                      \
-                               nop; "                                  \
-                               :                                       \
-                               : "i" (MSR_DCE)                 \
+       __asm__ __volatile__ (" mfs     r12, rmsr;      \
+                               nop;                    \
+                               ori     r12, r12, %0;   \
+                               mts     rmsr, r12;      \
+                               nop; "                  \
+                               :                       \
+                               : "i" (MSR_DCE)         \
                                : "memory", "r12");
-#endif
-       }
 }
 
-void _disable_dcache(void)
+static inline void __disable_dcache_nomsr(void)
 {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
-               __asm__ __volatile__ ("                                 \
-                               msrclr  r0, %0;                         \
-                               nop; "                                  \
-                               :                                       \
-                               : "i" (MSR_DCE)                 \
-                               : "memory");
-#else
-               __asm__ __volatile__ ("                                 \
-                               mfs     r12, rmsr;                      \
-                               nop;                                    \
-                               andi    r12, r12, ~%0;                  \
-                               mts     rmsr, r12;                      \
-                               nop; "                                  \
-                               :                                       \
-                               : "i" (MSR_DCE)                 \
+       __asm__ __volatile__ (" mfs     r12, rmsr;      \
+                               nop;                    \
+                               andi    r12, r12, ~%0;  \
+                               mts     rmsr, r12;      \
+                               nop; "                  \
+                               :                       \
+                               : "i" (MSR_DCE)         \
                                : "memory", "r12");
-#endif
 }
 
-void _invalidate_dcache(unsigned int addr)
+
+/* Helper macro for computing the limits of cache range loops */
+#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)   \
+do {                                                                   \
+       int align = ~(cache_line_length - 1);                           \
+       end = min(start + cache_size, end);                             \
+       start &= align;                                                 \
+       end = ((end & align) + cache_line_length);                      \
+} while (0);
+
+/*
+ * Helper macro to loop over the specified cache_size/line_length and
+ * execute 'op' on that cacheline
+ */
+#define CACHE_ALL_LOOP(cache_size, line_length, op)                    \
+do {                                                                   \
+       unsigned int len = cache_size;                                  \
+       int step = -line_length;                                        \
+       BUG_ON(step >= 0);                                              \
+                                                                       \
+       __asm__ __volatile__ (" 1:      " #op " %0, r0;                 \
+                                       bgtid   %0, 1b;                 \
+                                       addk    %0, %0, %1;             \
+                                       " : : "r" (len), "r" (step)     \
+                                       : "memory");                    \
+} while (0);
+
+
+#define CACHE_ALL_LOOP2(cache_size, line_length, op)                   \
+do {                                                                   \
+       unsigned int len = cache_size;                                  \
+       int step = -line_length;                                        \
+       BUG_ON(step >= 0);                                              \
+                                                                       \
+       __asm__ __volatile__ (" 1:      " #op " r0, %0;                 \
+                                       bgtid   %0, 1b;                 \
+                                       addk    %0, %0, %1;             \
+                                       " : : "r" (len), "r" (step)     \
+                                       : "memory");                    \
+} while (0);
+
+/* for wdc.flush/clear */
+#define CACHE_RANGE_LOOP_2(start, end, line_length, op)                        \
+do {                                                                   \
+       int step = -line_length;                                        \
+       int count = end - start;                                        \
+       BUG_ON(count <= 0);                                             \
+                                                                       \
+       __asm__ __volatile__ (" 1:      " #op " %0, %1;                 \
+                                       bgtid   %1, 1b;                 \
+                                       addk    %1, %1, %2;             \
+                                       " : : "r" (start), "r" (count), \
+                                       "r" (step) : "memory");         \
+} while (0);
+
+/* It is used only first parameter for OP - for wic, wdc */
+#define CACHE_RANGE_LOOP_1(start, end, line_length, op)                        \
+do {                                                                   \
+       int step = -line_length;                                        \
+       int count = end - start;                                        \
+       BUG_ON(count <= 0);                                             \
+                                                                       \
+       __asm__ __volatile__ (" 1:      addk    %0, %0, %1;             \
+                                       " #op " %0, r0;                 \
+                                       bgtid   %1, 1b;                 \
+                                       addk    %1, %1, %2;             \
+                                       " : : "r" (start), "r" (count), \
+                                       "r" (step) : "memory");         \
+} while (0);
+
+static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
 {
-               __asm__ __volatile__ ("                                 \
-                               wdc     %0, r0"                         \
-                               :                                       \
-                               : "r" (addr));
+       unsigned long flags;
+
+       pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+                               (unsigned int)start, (unsigned int) end);
+
+       CACHE_LOOP_LIMITS(start, end,
+                       cpuinfo.icache_line_length, cpuinfo.icache_size);
+
+       local_irq_save(flags);
+       __disable_icache_msr();
+
+       CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+
+       __enable_icache_msr();
+       local_irq_restore(flags);
 }
 
-void __invalidate_icache_all(void)
+static void __flush_icache_range_nomsr_irq(unsigned long start,
+                               unsigned long end)
 {
-       unsigned int i;
-       unsigned flags;
+       unsigned long flags;
 
-       if (cpuinfo.use_icache) {
-               local_irq_save(flags);
-               __disable_icache();
+       pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+                               (unsigned int)start, (unsigned int) end);
 
-               /* Just loop through cache size and invalidate, no need to add
-                       CACHE_BASE address */
-               for (i = 0; i < cpuinfo.icache_size;
-                       i += cpuinfo.icache_line_length)
-                               __invalidate_icache(i);
+       CACHE_LOOP_LIMITS(start, end,
+                       cpuinfo.icache_line_length, cpuinfo.icache_size);
 
-               __enable_icache();
-               local_irq_restore(flags);
-       }
+       local_irq_save(flags);
+       __disable_icache_nomsr();
+
+       CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+
+       __enable_icache_nomsr();
+       local_irq_restore(flags);
 }
 
-void __invalidate_icache_range(unsigned long start, unsigned long end)
+static void __flush_icache_range_noirq(unsigned long start,
+                               unsigned long end)
 {
-       unsigned int i;
-       unsigned flags;
-       unsigned int align;
-
-       if (cpuinfo.use_icache) {
-               /*
-                * No need to cover entire cache range,
-                * just cover cache footprint
-                */
-               end = min(start + cpuinfo.icache_size, end);
-               align = ~(cpuinfo.icache_line_length - 1);
-               start &= align; /* Make sure we are aligned */
-               /* Push end up to the next cache line */
-               end = ((end & align) + cpuinfo.icache_line_length);
-
-               local_irq_save(flags);
-               __disable_icache();
-
-               for (i = start; i < end; i += cpuinfo.icache_line_length)
-                       __invalidate_icache(i);
-
-               __enable_icache();
-               local_irq_restore(flags);
-       }
+       pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+                               (unsigned int)start, (unsigned int) end);
+
+       CACHE_LOOP_LIMITS(start, end,
+                       cpuinfo.icache_line_length, cpuinfo.icache_size);
+       CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+}
+
+static void __flush_icache_all_msr_irq(void)
+{
+       unsigned long flags;
+
+       pr_debug("%s\n", __func__);
+
+       local_irq_save(flags);
+       __disable_icache_msr();
+
+       CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+
+       __enable_icache_msr();
+       local_irq_restore(flags);
+}
+
+static void __flush_icache_all_nomsr_irq(void)
+{
+       unsigned long flags;
+
+       pr_debug("%s\n", __func__);
+
+       local_irq_save(flags);
+       __disable_icache_nomsr();
+
+       CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+
+       __enable_icache_nomsr();
+       local_irq_restore(flags);
 }
 
-void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
+static void __flush_icache_all_noirq(void)
 {
-       __invalidate_icache_all();
+       pr_debug("%s\n", __func__);
+       CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
 }
 
-void __invalidate_icache_user_range(struct vm_area_struct *vma,
-                               struct page *page, unsigned long adr,
-                               int len)
+static void __invalidate_dcache_all_msr_irq(void)
 {
-       __invalidate_icache_all();
+       unsigned long flags;
+
+       pr_debug("%s\n", __func__);
+
+       local_irq_save(flags);
+       __disable_dcache_msr();
+
+       CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+
+       __enable_dcache_msr();
+       local_irq_restore(flags);
 }
 
-void __invalidate_cache_sigtramp(unsigned long addr)
+static void __invalidate_dcache_all_nomsr_irq(void)
 {
-       __invalidate_icache_range(addr, addr + 8);
+       unsigned long flags;
+
+       pr_debug("%s\n", __func__);
+
+       local_irq_save(flags);
+       __disable_dcache_nomsr();
+
+       CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+
+       __enable_dcache_nomsr();
+       local_irq_restore(flags);
 }
 
-void __invalidate_dcache_all(void)
+static void __invalidate_dcache_all_noirq_wt(void)
 {
-       unsigned int i;
-       unsigned flags;
-
-       if (cpuinfo.use_dcache) {
-               local_irq_save(flags);
-               __disable_dcache();
-
-               /*
-                * Just loop through cache size and invalidate,
-                * no need to add CACHE_BASE address
-                */
-               for (i = 0; i < cpuinfo.dcache_size;
-                       i += cpuinfo.dcache_line_length)
-                               __invalidate_dcache(i);
-
-               __enable_dcache();
-               local_irq_restore(flags);
-       }
+       pr_debug("%s\n", __func__);
+       CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
 }
 
-void __invalidate_dcache_range(unsigned long start, unsigned long end)
+/* FIXME this is weird - should be only wdc but not work
+ * MS: I am getting bus errors and other weird things */
+static void __invalidate_dcache_all_wb(void)
 {
+       pr_debug("%s\n", __func__);
+       CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+                                       wdc.clear)
+
+#if 0
        unsigned int i;
-       unsigned flags;
-       unsigned int align;
-
-       if (cpuinfo.use_dcache) {
-               /*
-                * No need to cover entire cache range,
-                * just cover cache footprint
-                */
-               end = min(start + cpuinfo.dcache_size, end);
-               align = ~(cpuinfo.dcache_line_length - 1);
-               start &= align; /* Make sure we are aligned */
-               /* Push end up to the next cache line */
-               end = ((end & align) + cpuinfo.dcache_line_length);
-               local_irq_save(flags);
-               __disable_dcache();
-
-               for (i = start; i < end; i += cpuinfo.dcache_line_length)
-                       __invalidate_dcache(i);
-
-               __enable_dcache();
-               local_irq_restore(flags);
-       }
+
+       pr_debug("%s\n", __func__);
+
+       /* Just loop through cache size and invalidate it */
+       for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
+                       __invalidate_dcache(0, i);
+#endif
+}
+
+static void __invalidate_dcache_range_wb(unsigned long start,
+                                               unsigned long end)
+{
+       pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+                               (unsigned int)start, (unsigned int) end);
+
+       CACHE_LOOP_LIMITS(start, end,
+                       cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+       CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
+}
+
+static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
+                                                       unsigned long end)
+{
+       pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+                               (unsigned int)start, (unsigned int) end);
+       CACHE_LOOP_LIMITS(start, end,
+                       cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+       CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
 }
 
-void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
+static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
+                                                       unsigned long end)
 {
-       __invalidate_dcache_all();
+       unsigned long flags;
+
+       pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+                               (unsigned int)start, (unsigned int) end);
+       CACHE_LOOP_LIMITS(start, end,
+                       cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+       local_irq_save(flags);
+       __disable_dcache_msr();
+
+       CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+
+       __enable_dcache_msr();
+       local_irq_restore(flags);
+}
+
+static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
+                                                       unsigned long end)
+{
+       unsigned long flags;
+
+       pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+                               (unsigned int)start, (unsigned int) end);
+
+       CACHE_LOOP_LIMITS(start, end,
+                       cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+       local_irq_save(flags);
+       __disable_dcache_nomsr();
+
+       CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+
+       __enable_dcache_nomsr();
+       local_irq_restore(flags);
+}
+
+static void __flush_dcache_all_wb(void)
+{
+       pr_debug("%s\n", __func__);
+       CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+                               wdc.flush);
 }
 
-void __invalidate_dcache_user_range(struct vm_area_struct *vma,
-                               struct page *page, unsigned long adr,
-                               int len)
+static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
 {
-       __invalidate_dcache_all();
+       pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+                               (unsigned int)start, (unsigned int) end);
+
+       CACHE_LOOP_LIMITS(start, end,
+                       cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+       CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
+}
+
+/* struct for wb caches and for wt caches */
+struct scache *mbc;
+
+/* new wb cache model */
+const struct scache wb_msr = {
+       .ie = __enable_icache_msr,
+       .id = __disable_icache_msr,
+       .ifl = __flush_icache_all_noirq,
+       .iflr = __flush_icache_range_noirq,
+       .iin = __flush_icache_all_noirq,
+       .iinr = __flush_icache_range_noirq,
+       .de = __enable_dcache_msr,
+       .dd = __disable_dcache_msr,
+       .dfl = __flush_dcache_all_wb,
+       .dflr = __flush_dcache_range_wb,
+       .din = __invalidate_dcache_all_wb,
+       .dinr = __invalidate_dcache_range_wb,
+};
+
+/* There is only difference in ie, id, de, dd functions */
+const struct scache wb_nomsr = {
+       .ie = __enable_icache_nomsr,
+       .id = __disable_icache_nomsr,
+       .ifl = __flush_icache_all_noirq,
+       .iflr = __flush_icache_range_noirq,
+       .iin = __flush_icache_all_noirq,
+       .iinr = __flush_icache_range_noirq,
+       .de = __enable_dcache_nomsr,
+       .dd = __disable_dcache_nomsr,
+       .dfl = __flush_dcache_all_wb,
+       .dflr = __flush_dcache_range_wb,
+       .din = __invalidate_dcache_all_wb,
+       .dinr = __invalidate_dcache_range_wb,
+};
+
+/* Old wt cache model with disabling irq and turn off cache */
+const struct scache wt_msr = {
+       .ie = __enable_icache_msr,
+       .id = __disable_icache_msr,
+       .ifl = __flush_icache_all_msr_irq,
+       .iflr = __flush_icache_range_msr_irq,
+       .iin = __flush_icache_all_msr_irq,
+       .iinr = __flush_icache_range_msr_irq,
+       .de = __enable_dcache_msr,
+       .dd = __disable_dcache_msr,
+       .dfl = __invalidate_dcache_all_msr_irq,
+       .dflr = __invalidate_dcache_range_msr_irq_wt,
+       .din = __invalidate_dcache_all_msr_irq,
+       .dinr = __invalidate_dcache_range_msr_irq_wt,
+};
+
+const struct scache wt_nomsr = {
+       .ie = __enable_icache_nomsr,
+       .id = __disable_icache_nomsr,
+       .ifl = __flush_icache_all_nomsr_irq,
+       .iflr = __flush_icache_range_nomsr_irq,
+       .iin = __flush_icache_all_nomsr_irq,
+       .iinr = __flush_icache_range_nomsr_irq,
+       .de = __enable_dcache_nomsr,
+       .dd = __disable_dcache_nomsr,
+       .dfl = __invalidate_dcache_all_nomsr_irq,
+       .dflr = __invalidate_dcache_range_nomsr_irq,
+       .din = __invalidate_dcache_all_nomsr_irq,
+       .dinr = __invalidate_dcache_range_nomsr_irq,
+};
+
+/* New wt cache model for newer Microblaze versions */
+const struct scache wt_msr_noirq = {
+       .ie = __enable_icache_msr,
+       .id = __disable_icache_msr,
+       .ifl = __flush_icache_all_noirq,
+       .iflr = __flush_icache_range_noirq,
+       .iin = __flush_icache_all_noirq,
+       .iinr = __flush_icache_range_noirq,
+       .de = __enable_dcache_msr,
+       .dd = __disable_dcache_msr,
+       .dfl = __invalidate_dcache_all_noirq_wt,
+       .dflr = __invalidate_dcache_range_nomsr_wt,
+       .din = __invalidate_dcache_all_noirq_wt,
+       .dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+const struct scache wt_nomsr_noirq = {
+       .ie = __enable_icache_nomsr,
+       .id = __disable_icache_nomsr,
+       .ifl = __flush_icache_all_noirq,
+       .iflr = __flush_icache_range_noirq,
+       .iin = __flush_icache_all_noirq,
+       .iinr = __flush_icache_range_noirq,
+       .de = __enable_dcache_nomsr,
+       .dd = __disable_dcache_nomsr,
+       .dfl = __invalidate_dcache_all_noirq_wt,
+       .dflr = __invalidate_dcache_range_nomsr_wt,
+       .din = __invalidate_dcache_all_noirq_wt,
+       .dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
+#define CPUVER_7_20_A  0x0c
+#define CPUVER_7_20_D  0x0f
+
+#define INFO(s)        printk(KERN_INFO "cache: " s " \n");
+
+void microblaze_cache_init(void)
+{
+       if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
+               if (cpuinfo.dcache_wb) {
+                       INFO("wb_msr");
+                       mbc = (struct scache *)&wb_msr;
+                       if (cpuinfo.ver_code < CPUVER_7_20_D) {
+                               /* MS: problem with signal handling - hw bug */
+                               INFO("WB won't work properly");
+                       }
+               } else {
+                       if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+                               INFO("wt_msr_noirq");
+                               mbc = (struct scache *)&wt_msr_noirq;
+                       } else {
+                               INFO("wt_msr");
+                               mbc = (struct scache *)&wt_msr;
+                       }
+               }
+       } else {
+               if (cpuinfo.dcache_wb) {
+                       INFO("wb_nomsr");
+                       mbc = (struct scache *)&wb_nomsr;
+                       if (cpuinfo.ver_code < CPUVER_7_20_D) {
+                               /* MS: problem with signal handling - hw bug */
+                               INFO("WB won't work properly");
+                       }
+               } else {
+                       if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+                               INFO("wt_nomsr_noirq");
+                               mbc = (struct scache *)&wt_nomsr_noirq;
+                       } else {
+                               INFO("wt_nomsr");
+                               mbc = (struct scache *)&wt_nomsr;
+                       }
+               }
+       }
 }
index 1c3f18ba8af12e3a21d7f23a99e77c8f2c529c66..5372b24ad049cf92cfd881927c21f5e559a54089 100644 (file)
@@ -52,11 +52,12 @@ void __init setup_arch(char **cmdline_p)
        /* irq_early_init(); */
        setup_cpuinfo();
 
-       __invalidate_icache_all();
-       __enable_icache();
+       microblaze_cache_init();
 
-       __invalidate_dcache_all();
-       __enable_dcache();
+       enable_dcache();
+
+       invalidate_icache();
+       enable_icache();
 
        setup_memory();
 
index 0c96ac34c316f08fa8a94beb1f84cc9494822644..6de3db04b1a49f2b1ced440f4221749ed26f2553 100644 (file)
@@ -176,6 +176,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        struct rt_sigframe __user *frame;
        int err = 0;
        int signal;
+       unsigned long address = 0;
+#ifdef CONFIG_MMU
+       pmd_t *pmdp;
+       pte_t *ptep;
+#endif
 
        frame = get_sigframe(ka, regs, sizeof(*frame));
 
@@ -216,8 +221,29 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
         Negative 8 offset because return is rtsd r15, 8 */
        regs->r15 = ((unsigned long)frame->tramp)-8;
 
-       __invalidate_cache_sigtramp((unsigned long)frame->tramp);
-
+       address = ((unsigned long)frame->tramp);
+#ifdef CONFIG_MMU
+       pmdp = pmd_offset(pud_offset(
+                       pgd_offset(current->mm, address),
+                                       address), address);
+
+       preempt_disable();
+       ptep = pte_offset_map(pmdp, address);
+       if (pte_present(*ptep)) {
+               address = (unsigned long) page_address(pte_page(*ptep));
+               /* MS: I need add offset in page */
+               address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
+               /* MS address is virtual */
+               address = virt_to_phys(address);
+               invalidate_icache_range(address, address + 8);
+               flush_dcache_range(address, address + 8);
+       }
+       pte_unmap(ptep);
+       preempt_enable();
+#else
+       flush_icache_range(address, address + 8);
+       flush_dcache_range(address, address + 8);
+#endif
        if (err)
                goto give_sigsegv;