arch, drivers: replace for_each_membock() with for_each_mem_range()
[linux-block.git] / arch / arm / mm / pmsa-v8.c
CommitLineData
046835b4
VM
1/*
2 * Based on linux/arch/arm/pmsa-v7.c
3 *
4 * ARM PMSAv8 supporting functions.
5 */
6
7#include <linux/memblock.h>
8#include <linux/range.h>
9
10#include <asm/cp15.h>
11#include <asm/cputype.h>
12#include <asm/mpu.h>
13
14#include <asm/memory.h>
15#include <asm/sections.h>
16
17#include "mm.h"
18
19#ifndef CONFIG_CPU_V7M
20
21#define PRSEL __ACCESS_CP15(c6, 0, c2, 1)
22#define PRBAR __ACCESS_CP15(c6, 0, c3, 0)
23#define PRLAR __ACCESS_CP15(c6, 0, c3, 1)
24
25static inline u32 prlar_read(void)
26{
27 return read_sysreg(PRLAR);
28}
29
30static inline u32 prbar_read(void)
31{
32 return read_sysreg(PRBAR);
33}
34
35static inline void prsel_write(u32 v)
36{
37 write_sysreg(v, PRSEL);
38}
39
40static inline void prbar_write(u32 v)
41{
42 write_sysreg(v, PRBAR);
43}
44
45static inline void prlar_write(u32 v)
46{
47 write_sysreg(v, PRLAR);
48}
49#else
50
51static inline u32 prlar_read(void)
52{
53 return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RLAR);
54}
55
56static inline u32 prbar_read(void)
57{
58 return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RBAR);
59}
60
61static inline void prsel_write(u32 v)
62{
63 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RNR);
64}
65
66static inline void prbar_write(u32 v)
67{
68 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RBAR);
69}
70
71static inline void prlar_write(u32 v)
72{
73 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RLAR);
74}
75
76#endif
77
78static struct range __initdata io[MPU_MAX_REGIONS];
79static struct range __initdata mem[MPU_MAX_REGIONS];
80
81static unsigned int __initdata mpu_max_regions;
82
83static __init bool is_region_fixed(int number)
84{
85 switch (number) {
86 case PMSAv8_XIP_REGION:
87 case PMSAv8_KERNEL_REGION:
88 return true;
89 default:
90 return false;
91 }
92}
93
94void __init pmsav8_adjust_lowmem_bounds(void)
95{
96 phys_addr_t mem_end;
b10d6bca
MR
97 phys_addr_t reg_start, reg_end;
98 u64 i;
046835b4 99
b10d6bca
MR
100 for_each_mem_range(i, &reg_start, &reg_end) {
101 if (i == 0) {
046835b4
VM
102 phys_addr_t phys_offset = PHYS_OFFSET;
103
104 /*
105 * Initially only use memory continuous from
106 * PHYS_OFFSET */
b10d6bca 107 if (reg_start != phys_offset)
046835b4 108 panic("First memory bank must be contiguous from PHYS_OFFSET");
b10d6bca 109 mem_end = reg_end;
046835b4
VM
110 } else {
111 /*
112 * memblock auto merges contiguous blocks, remove
113 * all blocks afterwards in one go (we can't remove
114 * blocks separately while iterating)
115 */
116 pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
b10d6bca
MR
117 &mem_end, &reg_start);
118 memblock_remove(reg_start, 0 - reg_start);
046835b4
VM
119 break;
120 }
121 }
122}
123
124static int __init __mpu_max_regions(void)
125{
126 static int max_regions;
127 u32 mpuir;
128
129 if (max_regions)
130 return max_regions;
131
132 mpuir = read_cpuid_mputype();
133
134 max_regions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
135
136 return max_regions;
137}
138
139static int __init __pmsav8_setup_region(unsigned int number, u32 bar, u32 lar)
140{
141 if (number > mpu_max_regions
142 || number >= MPU_MAX_REGIONS)
143 return -ENOENT;
144
145 dsb();
146 prsel_write(number);
147 isb();
148 prbar_write(bar);
149 prlar_write(lar);
150
151 mpu_rgn_info.rgns[number].prbar = bar;
152 mpu_rgn_info.rgns[number].prlar = lar;
153
154 mpu_rgn_info.used++;
155
156 return 0;
157}
158
159static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end)
160{
161 u32 bar, lar;
162
163 if (is_region_fixed(number))
164 return -EINVAL;
165
166 bar = start;
49f30235 167 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
046835b4
VM
168
169 bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
170 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
171
172 return __pmsav8_setup_region(number, bar, lar);
173}
174
175static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end)
176{
177 u32 bar, lar;
178
179 if (is_region_fixed(number))
180 return -EINVAL;
181
182 bar = start;
49f30235 183 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
046835b4
VM
184
185 bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
186 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
187
188 return __pmsav8_setup_region(number, bar, lar);
189}
190
191static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end)
192{
193 u32 bar, lar;
194
195 if (!is_region_fixed(number))
196 return -EINVAL;
197
198 bar = start;
199 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
200
201 bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
202 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
203
204 prsel_write(number);
205 isb();
206
207 if (prbar_read() != bar || prlar_read() != lar)
208 return -EINVAL;
209
210 /* Reserved region was set up early, we just need a record for secondaries */
211 mpu_rgn_info.rgns[number].prbar = bar;
212 mpu_rgn_info.rgns[number].prlar = lar;
213
214 mpu_rgn_info.used++;
215
216 return 0;
217}
218
219#ifndef CONFIG_CPU_V7M
220static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end)
221{
222 u32 bar, lar;
223
224 if (number == PMSAv8_KERNEL_REGION)
225 return -EINVAL;
226
227 bar = start;
228 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
229
230 bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
231 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
232
233 return __pmsav8_setup_region(number, bar, lar);
234}
235#endif
236
237void __init pmsav8_setup(void)
238{
239 int i, err = 0;
240 int region = PMSAv8_KERNEL_REGION;
241
242 /* How many regions are supported ? */
243 mpu_max_regions = __mpu_max_regions();
244
245 /* RAM: single chunk of memory */
246 add_range(mem, ARRAY_SIZE(mem), 0, memblock.memory.regions[0].base,
247 memblock.memory.regions[0].base + memblock.memory.regions[0].size);
248
249 /* IO: cover full 4G range */
250 add_range(io, ARRAY_SIZE(io), 0, 0, 0xffffffff);
251
252 /* RAM and IO: exclude kernel */
253 subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END));
254 subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END));
255
256#ifdef CONFIG_XIP_KERNEL
257 /* RAM and IO: exclude xip */
258 subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
259 subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
260#endif
261
262#ifndef CONFIG_CPU_V7M
263 /* RAM and IO: exclude vectors */
264 subtract_range(mem, ARRAY_SIZE(mem), vectors_base, vectors_base + 2 * PAGE_SIZE);
265 subtract_range(io, ARRAY_SIZE(io), vectors_base, vectors_base + 2 * PAGE_SIZE);
266#endif
267 /* IO: exclude RAM */
268 for (i = 0; i < ARRAY_SIZE(mem); i++)
269 subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end);
270
271 /* Now program MPU */
272
273#ifdef CONFIG_XIP_KERNEL
274 /* ROM */
275 err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
276#endif
277 /* Kernel */
278 err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END));
279
280
281 /* IO */
282 for (i = 0; i < ARRAY_SIZE(io); i++) {
283 if (!io[i].end)
284 continue;
285
286 err |= pmsav8_setup_io(region++, io[i].start, io[i].end);
287 }
288
289 /* RAM */
290 for (i = 0; i < ARRAY_SIZE(mem); i++) {
291 if (!mem[i].end)
292 continue;
293
294 err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end);
295 }
296
297 /* Vectors */
298#ifndef CONFIG_CPU_V7M
299 err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE);
300#endif
301 if (err)
302 pr_warn("MPU region initialization failure! %d", err);
303 else
304 pr_info("Using ARM PMSAv8 Compliant MPU. Used %d of %d regions\n",
305 mpu_rgn_info.used, mpu_max_regions);
306}