s390/cio: fix a memleak in css_alloc_subchannel
[linux-block.git] / arch / s390 / boot / vmem.c
CommitLineData
bb1520d5
AG
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/sched/task.h>
3#include <linux/pgtable.h>
557b1970 4#include <linux/kasan.h>
bb1520d5
AG
5#include <asm/pgalloc.h>
6#include <asm/facility.h>
7#include <asm/sections.h>
8c37cb7d 8#include <asm/physmem_info.h>
8e9205d2 9#include <asm/maccess.h>
2154e0b3 10#include <asm/abs_lowcore.h>
bb1520d5
AG
11#include "decompressor.h"
12#include "boot.h"
13
f913a660
VG
14unsigned long __bootdata_preserved(s390_invalid_asce);
15
81e84796
HC
16#ifdef CONFIG_PROC_FS
17atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
18#endif
19
bb1520d5
AG
20#define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
21#define swapper_pg_dir vmlinux.swapper_pg_dir_off
22#define invalid_pg_dir vmlinux.invalid_pg_dir_off
23
557b1970
VG
24enum populate_mode {
25 POPULATE_NONE,
07fdd662 26 POPULATE_DIRECT,
557b1970
VG
27 POPULATE_ABS_LOWCORE,
28#ifdef CONFIG_KASAN
29 POPULATE_KASAN_MAP_SHADOW,
30 POPULATE_KASAN_ZERO_SHADOW,
31 POPULATE_KASAN_SHALLOW
32#endif
33};
34
35static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
36
37#ifdef CONFIG_KASAN
38
39#define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
40#define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
41#define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
42#define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
43#define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
44#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
45
46static pte_t pte_z;
47
3e826100
AG
48static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
49{
50 start = PAGE_ALIGN_DOWN(__sha(start));
51 end = PAGE_ALIGN(__sha(end));
52 pgtable_populate(start, end, mode);
53}
54
557b1970
VG
55static void kasan_populate_shadow(void)
56{
57 pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
58 pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
59 p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
60 unsigned long untracked_end;
61 unsigned long start, end;
62 int i;
63
64 pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
65 if (!machine.has_nx)
66 pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
67 crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
68 crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
69 crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
70 memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
71
72 /*
73 * Current memory layout:
74 * +- 0 -------------+ +- shadow start -+
75 * |1:1 ident mapping| /|1/8 of ident map|
76 * | | / | |
77 * +-end of ident map+ / +----------------+
78 * | ... gap ... | / | kasan |
79 * | | / | zero page |
80 * +- vmalloc area -+ / | mapping |
81 * | vmalloc_size | / | (untracked) |
82 * +- modules vaddr -+ / +----------------+
83 * | 2Gb |/ | unmapped | allocated per module
84 * +- shadow start -+ +----------------+
85 * | 1/8 addr space | | zero pg mapping| (untracked)
86 * +- shadow end ----+---------+- shadow end ---+
87 *
88 * Current memory layout (KASAN_VMALLOC):
89 * +- 0 -------------+ +- shadow start -+
90 * |1:1 ident mapping| /|1/8 of ident map|
91 * | | / | |
92 * +-end of ident map+ / +----------------+
93 * | ... gap ... | / | kasan zero page| (untracked)
94 * | | / | mapping |
95 * +- vmalloc area -+ / +----------------+
96 * | vmalloc_size | / |shallow populate|
97 * +- modules vaddr -+ / +----------------+
98 * | 2Gb |/ |shallow populate|
99 * +- shadow start -+ +----------------+
100 * | 1/8 addr space | | zero pg mapping| (untracked)
101 * +- shadow end ----+---------+- shadow end ---+
102 */
103
104 for_each_physmem_usable_range(i, &start, &end)
3e826100 105 kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
557b1970
VG
106 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
107 untracked_end = VMALLOC_START;
108 /* shallowly populate kasan shadow for vmalloc and modules */
3e826100 109 kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
557b1970
VG
110 } else {
111 untracked_end = MODULES_VADDR;
112 }
113 /* populate kasan shadow for untracked memory */
3e826100
AG
114 kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
115 kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
557b1970
VG
116}
117
118static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
119 unsigned long end, enum populate_mode mode)
120{
121 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
122 IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
123 pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
124 return true;
125 }
126 return false;
127}
128
129static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
130 unsigned long end, enum populate_mode mode)
131{
132 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
133 IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
134 p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
135 return true;
136 }
137 return false;
138}
139
140static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
141 unsigned long end, enum populate_mode mode)
142{
143 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
144 IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
145 pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
146 return true;
147 }
148 return false;
149}
150
151static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
152 unsigned long end, enum populate_mode mode)
153{
154 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
155 IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
156 pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
157 return true;
158 }
159 return false;
160}
161
162static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
163{
164 pte_t entry;
165
166 if (mode == POPULATE_KASAN_ZERO_SHADOW) {
167 set_pte(pte, pte_z);
168 return true;
169 }
170 return false;
171}
172#else
173
174static inline void kasan_populate_shadow(void) {}
175
176static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
177 unsigned long end, enum populate_mode mode)
178{
179 return false;
180}
181
182static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
183 unsigned long end, enum populate_mode mode)
184{
185 return false;
186}
187
188static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
189 unsigned long end, enum populate_mode mode)
190{
191 return false;
192}
193
194static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
195 unsigned long end, enum populate_mode mode)
196{
197 return false;
198}
199
200static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
201{
202 return false;
203}
204
205#endif
206
8e9205d2
AG
207/*
208 * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
209 */
210static inline pte_t *__virt_to_kpte(unsigned long va)
211{
212 return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
213}
214
bb1520d5
AG
215static void *boot_crst_alloc(unsigned long val)
216{
f913a660 217 unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
bb1520d5
AG
218 unsigned long *table;
219
f913a660
VG
220 table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
221 crst_table_init(table, val);
bb1520d5
AG
222 return table;
223}
224
225static pte_t *boot_pte_alloc(void)
226{
557b1970 227 static void *pte_leftover;
bb1520d5
AG
228 pte_t *pte;
229
557b1970
VG
230 /*
231 * handling pte_leftovers this way helps to avoid memory fragmentation
232 * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
233 */
234 if (!pte_leftover) {
235 pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
236 pte = pte_leftover + _PAGE_TABLE_SIZE;
237 } else {
238 pte = pte_leftover;
239 pte_leftover = NULL;
240 }
241
bb1520d5
AG
242 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
243 return pte;
244}
245
557b1970 246static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
e0e0a87b
AG
247{
248 switch (mode) {
8e9205d2
AG
249 case POPULATE_NONE:
250 return -1;
07fdd662 251 case POPULATE_DIRECT:
e0e0a87b 252 return addr;
2154e0b3
AG
253 case POPULATE_ABS_LOWCORE:
254 return __abs_lowcore_pa(addr);
557b1970
VG
255#ifdef CONFIG_KASAN
256 case POPULATE_KASAN_MAP_SHADOW:
257 addr = physmem_alloc_top_down(RR_VMEM, size, size);
258 memset((void *)addr, 0, size);
259 return addr;
260#endif
e0e0a87b
AG
261 default:
262 return -1;
263 }
264}
265
bb1520d5
AG
266static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end)
267{
268 return machine.has_edat2 &&
269 IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
270}
271
272static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
273{
274 return machine.has_edat1 &&
275 IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
276}
277
e0e0a87b
AG
278static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
279 enum populate_mode mode)
bb1520d5 280{
81e84796 281 unsigned long pages = 0;
bb1520d5
AG
282 pte_t *pte, entry;
283
284 pte = pte_offset_kernel(pmd, addr);
285 for (; addr < end; addr += PAGE_SIZE, pte++) {
286 if (pte_none(*pte)) {
557b1970
VG
287 if (kasan_pte_populate_zero_shadow(pte, mode))
288 continue;
289 entry = __pte(_pa(addr, PAGE_SIZE, mode));
c0f1d478
HC
290 entry = set_pte_bit(entry, PAGE_KERNEL);
291 if (!machine.has_nx)
292 entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
bb1520d5 293 set_pte(pte, entry);
81e84796 294 pages++;
bb1520d5
AG
295 }
296 }
81e84796
HC
297 if (mode == POPULATE_DIRECT)
298 update_page_count(PG_DIRECT_MAP_4K, pages);
bb1520d5
AG
299}
300
e0e0a87b
AG
301static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
302 enum populate_mode mode)
bb1520d5 303{
81e84796 304 unsigned long next, pages = 0;
bb1520d5
AG
305 pmd_t *pmd, entry;
306 pte_t *pte;
307
308 pmd = pmd_offset(pud, addr);
309 for (; addr < end; addr = next, pmd++) {
310 next = pmd_addr_end(addr, end);
311 if (pmd_none(*pmd)) {
557b1970
VG
312 if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
313 continue;
bb1520d5 314 if (can_large_pmd(pmd, addr, next)) {
557b1970 315 entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
c0f1d478
HC
316 entry = set_pmd_bit(entry, SEGMENT_KERNEL);
317 if (!machine.has_nx)
318 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
bb1520d5 319 set_pmd(pmd, entry);
81e84796 320 pages++;
bb1520d5
AG
321 continue;
322 }
323 pte = boot_pte_alloc();
324 pmd_populate(&init_mm, pmd, pte);
325 } else if (pmd_large(*pmd)) {
326 continue;
327 }
e0e0a87b 328 pgtable_pte_populate(pmd, addr, next, mode);
bb1520d5 329 }
81e84796
HC
330 if (mode == POPULATE_DIRECT)
331 update_page_count(PG_DIRECT_MAP_1M, pages);
bb1520d5
AG
332}
333
e0e0a87b
AG
334static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
335 enum populate_mode mode)
bb1520d5 336{
81e84796 337 unsigned long next, pages = 0;
bb1520d5
AG
338 pud_t *pud, entry;
339 pmd_t *pmd;
340
341 pud = pud_offset(p4d, addr);
342 for (; addr < end; addr = next, pud++) {
343 next = pud_addr_end(addr, end);
344 if (pud_none(*pud)) {
557b1970
VG
345 if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
346 continue;
bb1520d5 347 if (can_large_pud(pud, addr, next)) {
557b1970 348 entry = __pud(_pa(addr, _REGION3_SIZE, mode));
c0f1d478
HC
349 entry = set_pud_bit(entry, REGION3_KERNEL);
350 if (!machine.has_nx)
351 entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
bb1520d5 352 set_pud(pud, entry);
81e84796 353 pages++;
bb1520d5
AG
354 continue;
355 }
356 pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
357 pud_populate(&init_mm, pud, pmd);
358 } else if (pud_large(*pud)) {
359 continue;
360 }
e0e0a87b 361 pgtable_pmd_populate(pud, addr, next, mode);
bb1520d5 362 }
81e84796
HC
363 if (mode == POPULATE_DIRECT)
364 update_page_count(PG_DIRECT_MAP_2G, pages);
bb1520d5
AG
365}
366
e0e0a87b
AG
367static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
368 enum populate_mode mode)
bb1520d5
AG
369{
370 unsigned long next;
371 p4d_t *p4d;
372 pud_t *pud;
373
374 p4d = p4d_offset(pgd, addr);
375 for (; addr < end; addr = next, p4d++) {
376 next = p4d_addr_end(addr, end);
377 if (p4d_none(*p4d)) {
557b1970
VG
378 if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
379 continue;
bb1520d5
AG
380 pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
381 p4d_populate(&init_mm, p4d, pud);
382 }
e0e0a87b 383 pgtable_pud_populate(p4d, addr, next, mode);
bb1520d5
AG
384 }
385}
386
e0e0a87b 387static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
bb1520d5
AG
388{
389 unsigned long next;
390 pgd_t *pgd;
391 p4d_t *p4d;
392
393 pgd = pgd_offset(&init_mm, addr);
394 for (; addr < end; addr = next, pgd++) {
395 next = pgd_addr_end(addr, end);
396 if (pgd_none(*pgd)) {
557b1970
VG
397 if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
398 continue;
bb1520d5
AG
399 p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
400 pgd_populate(&init_mm, pgd, p4d);
401 }
557b1970
VG
402#ifdef CONFIG_KASAN
403 if (mode == POPULATE_KASAN_SHALLOW)
404 continue;
405#endif
e0e0a87b 406 pgtable_p4d_populate(pgd, addr, next, mode);
bb1520d5
AG
407 }
408}
409
bf64f051 410void setup_vmem(unsigned long asce_limit)
bb1520d5 411{
e966ccf8 412 unsigned long start, end;
bb1520d5
AG
413 unsigned long asce_type;
414 unsigned long asce_bits;
e966ccf8 415 int i;
bb1520d5
AG
416
417 if (asce_limit == _REGION1_SIZE) {
418 asce_type = _REGION2_ENTRY_EMPTY;
419 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
420 } else {
421 asce_type = _REGION3_ENTRY_EMPTY;
422 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
423 }
424 s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
425
426 crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
427 crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
428
429 /*
430 * To allow prefixing the lowcore must be mapped with 4KB pages.
431 * To prevent creation of a large page at address 0 first map
432 * the lowcore and create the identity mapping only afterwards.
bb1520d5 433 */
07fdd662 434 pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
8c37cb7d 435 for_each_physmem_usable_range(i, &start, &end)
07fdd662 436 pgtable_populate(start, end, POPULATE_DIRECT);
2154e0b3
AG
437 pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
438 POPULATE_ABS_LOWCORE);
8e9205d2
AG
439 pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
440 POPULATE_NONE);
441 memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
bb1520d5 442
557b1970
VG
443 kasan_populate_shadow();
444
bb1520d5
AG
445 S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
446 S390_lowcore.user_asce = s390_invalid_asce;
447
448 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
449 __ctl_load(S390_lowcore.user_asce, 7, 7);
450 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
451
452 init_mm.context.asce = S390_lowcore.kernel_asce;
453}