powerpc/mm: Fixup tlbie vs mtpidr/mtlpidr ordering issue on POWER9
[linux-2.6-block.git] / arch / powerpc / include / asm / cacheflush.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
26ef5c09 2/*
26ef5c09
DG
3 */
4#ifndef _ASM_POWERPC_CACHEFLUSH_H
5#define _ASM_POWERPC_CACHEFLUSH_H
6
7#ifdef __KERNEL__
1da177e4
LT
8
9#include <linux/mm.h>
10#include <asm/cputable.h>
11
12/*
26ef5c09
DG
13 * No cache flushing is required when address mappings are changed,
14 * because the caches on PowerPCs are physically addressed.
1da177e4
LT
15 */
16#define flush_cache_all() do { } while (0)
17#define flush_cache_mm(mm) do { } while (0)
ec8c0446 18#define flush_cache_dup_mm(mm) do { } while (0)
1da177e4
LT
19#define flush_cache_range(vma, start, end) do { } while (0)
20#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
21#define flush_icache_page(vma, page) do { } while (0)
1da177e4
LT
22#define flush_cache_vunmap(start, end) do { } while (0)
23
ff5bc793 24#ifdef CONFIG_PPC_BOOK3S_64
f1cb8f9b
NP
25/*
26 * Book3s has no ptesync after setting a pte, so without this ptesync it's
27 * possible for a kernel virtual mapping access to return a spurious fault
28 * if it's accessed right after the pte is set. The page fault handler does
29 * not expect this type of fault. flush_cache_vmap is not exactly the right
30 * place to put this, but it seems to work well enough.
31 */
04db3ede
QC
32static inline void flush_cache_vmap(unsigned long start, unsigned long end)
33{
34 asm volatile("ptesync" ::: "memory");
35}
f1cb8f9b 36#else
04db3ede 37static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
f1cb8f9b
NP
38#endif
39
2d4dc890 40#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
1da177e4
LT
41extern void flush_dcache_page(struct page *page);
42#define flush_dcache_mmap_lock(mapping) do { } while (0)
43#define flush_dcache_mmap_unlock(mapping) do { } while (0)
44
3b04c300 45extern void flush_icache_range(unsigned long, unsigned long);
1da177e4
LT
46extern void flush_icache_user_range(struct vm_area_struct *vma,
47 struct page *page, unsigned long addr,
48 int len);
26ef5c09
DG
49extern void __flush_dcache_icache(void *page_va);
50extern void flush_dcache_icache_page(struct page *page);
51#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
52extern void __flush_dcache_icache_phys(unsigned long physaddr);
2f7d2b74
SW
53#else
54static inline void __flush_dcache_icache_phys(unsigned long physaddr)
55{
56 BUG();
57}
58#endif
1da177e4 59
affe587b
CL
60/*
61 * Write any modified data cache blocks out to memory and invalidate them.
62 * Does not invalidate the corresponding instruction cache blocks.
63 */
64static inline void flush_dcache_range(unsigned long start, unsigned long stop)
65{
d98fc70f
CL
66 unsigned long shift = l1_cache_shift();
67 unsigned long bytes = l1_cache_bytes();
68 void *addr = (void *)(start & ~(bytes - 1));
69 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
affe587b
CL
70 unsigned long i;
71
22e9c88d
CL
72 if (IS_ENABLED(CONFIG_PPC64)) {
73 mb(); /* sync */
74 isync();
75 }
76
d98fc70f 77 for (i = 0; i < size >> shift; i++, addr += bytes)
affe587b
CL
78 dcbf(addr);
79 mb(); /* sync */
22e9c88d
CL
80
81 if (IS_ENABLED(CONFIG_PPC64))
82 isync();
affe587b
CL
83}
84
85/*
86 * Write any modified data cache blocks out to memory.
87 * Does not invalidate the corresponding cache lines (especially for
88 * any corresponding instruction cache).
89 */
90static inline void clean_dcache_range(unsigned long start, unsigned long stop)
91{
d98fc70f
CL
92 unsigned long shift = l1_cache_shift();
93 unsigned long bytes = l1_cache_bytes();
94 void *addr = (void *)(start & ~(bytes - 1));
95 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
affe587b
CL
96 unsigned long i;
97
d98fc70f 98 for (i = 0; i < size >> shift; i++, addr += bytes)
affe587b
CL
99 dcbst(addr);
100 mb(); /* sync */
101}
102
103/*
104 * Like above, but invalidate the D-cache. This is used by the 8xx
105 * to invalidate the cache so the PPC core doesn't get stale data
106 * from the CPM (no cache snooping here :-).
107 */
108static inline void invalidate_dcache_range(unsigned long start,
109 unsigned long stop)
110{
d98fc70f
CL
111 unsigned long shift = l1_cache_shift();
112 unsigned long bytes = l1_cache_bytes();
113 void *addr = (void *)(start & ~(bytes - 1));
114 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
affe587b
CL
115 unsigned long i;
116
d98fc70f 117 for (i = 0; i < size >> shift; i++, addr += bytes)
affe587b
CL
118 dcbi(addr);
119 mb(); /* sync */
120}
121
1da177e4 122#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
26ef5c09
DG
123 do { \
124 memcpy(dst, src, len); \
125 flush_icache_user_range(vma, page, vaddr, len); \
126 } while (0)
1da177e4
LT
127#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
128 memcpy(dst, src, len)
129
26ef5c09 130#endif /* __KERNEL__ */
1da177e4 131
26ef5c09 132#endif /* _ASM_POWERPC_CACHEFLUSH_H */