powerpc/mm: Fixup tlbie vs mtpidr/mtlpidr ordering issue on POWER9
[linux-2.6-block.git] / arch / powerpc / include / asm / cache.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
26ef5c09
DG
2#ifndef _ASM_POWERPC_CACHE_H
3#define _ASM_POWERPC_CACHE_H
4
5#ifdef __KERNEL__
6
26ef5c09
DG
7
8/* bytes per L1 cache line */
968159c0 9#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
26ef5c09
DG
10#define L1_CACHE_SHIFT 4
11#define MAX_COPY_PREFETCH 1
1128bb78 12#define IFETCH_ALIGN_SHIFT 2
3dfa8773
KG
13#elif defined(CONFIG_PPC_E500MC)
14#define L1_CACHE_SHIFT 6
15#define MAX_COPY_PREFETCH 4
1128bb78 16#define IFETCH_ALIGN_SHIFT 3
26ef5c09 17#elif defined(CONFIG_PPC32)
26ef5c09 18#define MAX_COPY_PREFETCH 4
1128bb78 19#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */
e7f75ad0
DK
20#if defined(CONFIG_PPC_47x)
21#define L1_CACHE_SHIFT 7
22#else
23#define L1_CACHE_SHIFT 5
24#endif
26ef5c09
DG
25#else /* CONFIG_PPC64 */
26#define L1_CACHE_SHIFT 7
f4329f2e 27#define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */
26ef5c09
DG
28#endif
29
30#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
31
32#define SMP_CACHE_BYTES L1_CACHE_BYTES
26ef5c09 33
f4329f2e
NP
34#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT)
35
d98fc70f
CL
36#if !defined(__ASSEMBLY__)
37#ifdef CONFIG_PPC64
e2827fe5
BH
38
39struct ppc_cache_info {
40 u32 size;
41 u32 line_size;
42 u32 block_size; /* L1 only */
43 u32 log_block_size;
44 u32 blocks_per_page;
45 u32 sets;
98a5f361 46 u32 assoc;
e2827fe5
BH
47};
48
26ef5c09 49struct ppc64_caches {
e2827fe5
BH
50 struct ppc_cache_info l1d;
51 struct ppc_cache_info l1i;
65e01f38
BH
52 struct ppc_cache_info l2;
53 struct ppc_cache_info l3;
26ef5c09
DG
54};
55
56extern struct ppc64_caches ppc64_caches;
22e9c88d
CL
57
58static inline u32 l1_cache_shift(void)
59{
60 return ppc64_caches.l1d.log_block_size;
61}
62
63static inline u32 l1_cache_bytes(void)
64{
65 return ppc64_caches.l1d.block_size;
66}
d98fc70f
CL
67#else
68static inline u32 l1_cache_shift(void)
69{
70 return L1_CACHE_SHIFT;
71}
72
73static inline u32 l1_cache_bytes(void)
74{
75 return L1_CACHE_BYTES;
76}
77#endif
78#endif /* ! __ASSEMBLY__ */
26ef5c09 79
0ce63670
KH
80#if defined(__ASSEMBLY__)
81/*
82 * For a snooping icache, we still need a dummy icbi to purge all the
83 * prefetched instructions from the ifetch buffers. We also need a sync
84 * before the icbi to order the the actual stores to memory that might
85 * have modified instructions with the icbi.
86 */
87#define PURGE_PREFETCHED_INS \
88 sync; \
89 icbi 0,r3; \
90 sync; \
91 isync
ae3a197e 92
0ce63670 93#else
54cb27a7 94#define __read_mostly __attribute__((__section__(".data..read_mostly")))
ae3a197e 95
d7cceda9 96#ifdef CONFIG_PPC_BOOK3S_32
ae3a197e
DH
97extern long _get_L2CR(void);
98extern long _get_L3CR(void);
99extern void _set_L2CR(unsigned long);
100extern void _set_L3CR(unsigned long);
101#else
102#define _get_L2CR() 0L
103#define _get_L3CR() 0L
104#define _set_L2CR(val) do { } while(0)
105#define _set_L3CR(val) do { } while(0)
bd67fcf9
TB
106#endif
107
d6bfa02f
CL
108static inline void dcbz(void *addr)
109{
ed4289e8 110 __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
d6bfa02f
CL
111}
112
113static inline void dcbi(void *addr)
114{
ed4289e8 115 __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
d6bfa02f
CL
116}
117
118static inline void dcbf(void *addr)
119{
ed4289e8 120 __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
d6bfa02f
CL
121}
122
123static inline void dcbst(void *addr)
124{
ed4289e8 125 __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
d6bfa02f 126}
ae3a197e 127#endif /* !__ASSEMBLY__ */
26ef5c09
DG
128#endif /* __KERNEL__ */
129#endif /* _ASM_POWERPC_CACHE_H */