MIPS: cache: Provide cache flush operations for XFS
authorRalf Baechle <ralf@linux-mips.org>
Fri, 17 Jun 2011 15:20:28 +0000 (16:20 +0100)
committerRalf Baechle <ralf@linux-mips.org>
Thu, 20 Oct 2011 14:00:18 +0000 (15:00 +0100)
Until now flush_kernel_vmap_range() and invalidate_kernel_vmap_range() did
not exist on MIPS resulting in heavy cache corruption on XFS filesystems.

Left for the post-3.0 time: optimization and make this work with highmem,
too.  Since the combination of highmem + cache aliases atm doesn't work
this isn't a regression.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Patchwork: https://patchwork.linux-mips.org/patch/2505/

arch/mips/include/asm/cacheflush.h
arch/mips/mm/c-octeon.c
arch/mips/mm/c-r3k.c
arch/mips/mm/c-r4k.c
arch/mips/mm/c-tx39.c
arch/mips/mm/cache.c

index 40bb9fde205ff9756812134cc826120f5d8bcb30..69468ded282820efd9547f7aa3837562e56db016 100644 (file)
@@ -114,4 +114,28 @@ unsigned long run_uncached(void *func);
 extern void *kmap_coherent(struct page *page, unsigned long addr);
 extern void kunmap_coherent(void);
 
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+       BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
+}
+
+/*
+ * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
+ * cache writeback and invalidate operation.
+ */
+extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
+
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+       if (cpu_has_dc_aliases)
+               __flush_kernel_vmap_range((unsigned long) vaddr, size);
+}
+
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+       if (cpu_has_dc_aliases)
+               __flush_kernel_vmap_range((unsigned long) vaddr, size);
+}
+
 #endif /* _ASM_CACHEFLUSH_H */
index 16c4d256b76f3f7179e5a77b856bd3a7a3825a52..daa81f7284ac89a5411b88d86f77ff6df1585f55 100644 (file)
@@ -169,6 +169,10 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
                octeon_flush_icache_all_cores(vma);
 }
 
+static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+       BUG();
+}
 
 /**
  * Probe Octeon's caches
@@ -273,6 +277,8 @@ void __cpuinit octeon_cache_init(void)
        flush_icache_range              = octeon_flush_icache_range;
        local_flush_icache_range        = local_octeon_flush_icache_range;
 
+       __flush_kernel_vmap_range       = octeon_flush_kernel_vmap_range;
+
        build_clear_page();
        build_copy_page();
 }
index e6b0efd3f6a4ed0746141c0ecdad4a9b22c71d76..0765583d0c924f6252ccb313052c46be2d81315e 100644 (file)
@@ -299,6 +299,11 @@ static void r3k_flush_cache_sigtramp(unsigned long addr)
        write_c0_status(flags);
 }
 
+static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+       BUG();
+}
+
 static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
 {
        /* Catch bad driver code */
@@ -323,6 +328,8 @@ void __cpuinit r3k_cache_init(void)
        flush_icache_range = r3k_flush_icache_range;
        local_flush_icache_range = r3k_flush_icache_range;
 
+       __flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
+
        flush_cache_sigtramp = r3k_flush_cache_sigtramp;
        local_flush_data_cache_page = local_r3k_flush_data_cache_page;
        flush_data_cache_page = r3k_flush_data_cache_page;
index b9aabb998a32ab5856fb53e910785063eea78613..a79fe9aa7721aa56f05d8ab7995b6503cd98f618 100644 (file)
@@ -722,6 +722,39 @@ static void r4k_flush_icache_all(void)
                r4k_blast_icache();
 }
 
+struct flush_kernel_vmap_range_args {
+       unsigned long   vaddr;
+       int             size;
+};
+
+static inline void local_r4k_flush_kernel_vmap_range(void *args)
+{
+       struct flush_kernel_vmap_range_args *vmra = args;
+       unsigned long vaddr = vmra->vaddr;
+       int size = vmra->size;
+
+       /*
+        * Aliases only affect the primary caches so don't bother with
+        * S-caches or T-caches.
+        */
+       if (cpu_has_safe_index_cacheops && size >= dcache_size)
+               r4k_blast_dcache();
+       else {
+               R4600_HIT_CACHEOP_WAR_IMPL;
+               blast_dcache_range(vaddr, vaddr + size);
+       }
+}
+
+static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+       struct flush_kernel_vmap_range_args args;
+
+       args.vaddr = (unsigned long) vaddr;
+       args.size = size;
+
+       r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
+}
+
 static inline void rm7k_erratum31(void)
 {
        const unsigned long ic_lsize = 32;
@@ -1403,6 +1436,8 @@ void __cpuinit r4k_cache_init(void)
        flush_cache_page        = r4k_flush_cache_page;
        flush_cache_range       = r4k_flush_cache_range;
 
+       __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
+
        flush_cache_sigtramp    = r4k_flush_cache_sigtramp;
        flush_icache_all        = r4k_flush_icache_all;
        local_flush_data_cache_page     = local_r4k_flush_data_cache_page;
index d352fad3e45101b03191fcdd994adfdb6444f9de..a43c197ccf8c48bd02f27281cba92df44221137b 100644 (file)
@@ -253,6 +253,11 @@ static void tx39_flush_icache_range(unsigned long start, unsigned long end)
        }
 }
 
+static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+       BUG();
+}
+
 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 {
        unsigned long end;
@@ -394,6 +399,8 @@ void __cpuinit tx39_cache_init(void)
                flush_icache_range = tx39_flush_icache_range;
                local_flush_icache_range = tx39_flush_icache_range;
 
+               __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
+
                flush_cache_sigtramp = tx39_flush_cache_sigtramp;
                local_flush_data_cache_page = local_tx39_flush_data_cache_page;
                flush_data_cache_page = tx39_flush_data_cache_page;
index 12af739048fada0927f50e414f30097b8bbb96d6..829320c7b175372f3695248aee63329f18f7661f 100644 (file)
@@ -35,6 +35,11 @@ void (*local_flush_icache_range)(unsigned long start, unsigned long end);
 void (*__flush_cache_vmap)(void);
 void (*__flush_cache_vunmap)(void);
 
+void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
+void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
+
+EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
+
 /* MIPS specific cache operations */
 void (*flush_cache_sigtramp)(unsigned long addr);
 void (*local_flush_data_cache_page)(void * addr);