| 1 | /* |
| 2 | * This file contains some kasan initialization code. |
| 3 | * |
| 4 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. |
| 5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <linux/bootmem.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/kasan.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/memblock.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/pfn.h> |
| 20 | #include <linux/slab.h> |
| 21 | |
| 22 | #include <asm/page.h> |
| 23 | #include <asm/pgalloc.h> |
| 24 | |
| 25 | #include "kasan.h" |
| 26 | |
| 27 | /* |
| 28 | * This page serves two purposes: |
| 29 | * - It used as early shadow memory. The entire shadow region populated |
| 30 | * with this page, before we will be able to setup normal shadow memory. |
| 31 | * - Latter it reused it as zero shadow to cover large ranges of memory |
| 32 | * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). |
| 33 | */ |
| 34 | unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; |
| 35 | |
| 36 | #if CONFIG_PGTABLE_LEVELS > 4 |
| 37 | p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss; |
| 38 | static inline bool kasan_p4d_table(pgd_t pgd) |
| 39 | { |
| 40 | return pgd_page(pgd) == virt_to_page(lm_alias(kasan_zero_p4d)); |
| 41 | } |
| 42 | #else |
| 43 | static inline bool kasan_p4d_table(pgd_t pgd) |
| 44 | { |
| 45 | return 0; |
| 46 | } |
| 47 | #endif |
| 48 | #if CONFIG_PGTABLE_LEVELS > 3 |
| 49 | pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; |
| 50 | static inline bool kasan_pud_table(p4d_t p4d) |
| 51 | { |
| 52 | return p4d_page(p4d) == virt_to_page(lm_alias(kasan_zero_pud)); |
| 53 | } |
| 54 | #else |
| 55 | static inline bool kasan_pud_table(p4d_t p4d) |
| 56 | { |
| 57 | return 0; |
| 58 | } |
| 59 | #endif |
| 60 | #if CONFIG_PGTABLE_LEVELS > 2 |
| 61 | pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; |
| 62 | static inline bool kasan_pmd_table(pud_t pud) |
| 63 | { |
| 64 | return pud_page(pud) == virt_to_page(lm_alias(kasan_zero_pmd)); |
| 65 | } |
| 66 | #else |
| 67 | static inline bool kasan_pmd_table(pud_t pud) |
| 68 | { |
| 69 | return 0; |
| 70 | } |
| 71 | #endif |
| 72 | pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; |
| 73 | |
| 74 | static inline bool kasan_pte_table(pmd_t pmd) |
| 75 | { |
| 76 | return pmd_page(pmd) == virt_to_page(lm_alias(kasan_zero_pte)); |
| 77 | } |
| 78 | |
| 79 | static inline bool kasan_zero_page_entry(pte_t pte) |
| 80 | { |
| 81 | return pte_page(pte) == virt_to_page(lm_alias(kasan_zero_page)); |
| 82 | } |
| 83 | |
| 84 | static __init void *early_alloc(size_t size, int node) |
| 85 | { |
| 86 | return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), |
| 87 | BOOTMEM_ALLOC_ACCESSIBLE, node); |
| 88 | } |
| 89 | |
| 90 | static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, |
| 91 | unsigned long end) |
| 92 | { |
| 93 | pte_t *pte = pte_offset_kernel(pmd, addr); |
| 94 | pte_t zero_pte; |
| 95 | |
| 96 | zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL); |
| 97 | zero_pte = pte_wrprotect(zero_pte); |
| 98 | |
| 99 | while (addr + PAGE_SIZE <= end) { |
| 100 | set_pte_at(&init_mm, addr, pte, zero_pte); |
| 101 | addr += PAGE_SIZE; |
| 102 | pte = pte_offset_kernel(pmd, addr); |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, |
| 107 | unsigned long end) |
| 108 | { |
| 109 | pmd_t *pmd = pmd_offset(pud, addr); |
| 110 | unsigned long next; |
| 111 | |
| 112 | do { |
| 113 | next = pmd_addr_end(addr, end); |
| 114 | |
| 115 | if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { |
| 116 | pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); |
| 117 | continue; |
| 118 | } |
| 119 | |
| 120 | if (pmd_none(*pmd)) { |
| 121 | pte_t *p; |
| 122 | |
| 123 | if (slab_is_available()) |
| 124 | p = pte_alloc_one_kernel(&init_mm, addr); |
| 125 | else |
| 126 | p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); |
| 127 | if (!p) |
| 128 | return -ENOMEM; |
| 129 | |
| 130 | pmd_populate_kernel(&init_mm, pmd, p); |
| 131 | } |
| 132 | zero_pte_populate(pmd, addr, next); |
| 133 | } while (pmd++, addr = next, addr != end); |
| 134 | |
| 135 | return 0; |
| 136 | } |
| 137 | |
| 138 | static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, |
| 139 | unsigned long end) |
| 140 | { |
| 141 | pud_t *pud = pud_offset(p4d, addr); |
| 142 | unsigned long next; |
| 143 | |
| 144 | do { |
| 145 | next = pud_addr_end(addr, end); |
| 146 | if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { |
| 147 | pmd_t *pmd; |
| 148 | |
| 149 | pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); |
| 150 | pmd = pmd_offset(pud, addr); |
| 151 | pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); |
| 152 | continue; |
| 153 | } |
| 154 | |
| 155 | if (pud_none(*pud)) { |
| 156 | pmd_t *p; |
| 157 | |
| 158 | if (slab_is_available()) { |
| 159 | p = pmd_alloc(&init_mm, pud, addr); |
| 160 | if (!p) |
| 161 | return -ENOMEM; |
| 162 | } else { |
| 163 | pud_populate(&init_mm, pud, |
| 164 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
| 165 | } |
| 166 | } |
| 167 | zero_pmd_populate(pud, addr, next); |
| 168 | } while (pud++, addr = next, addr != end); |
| 169 | |
| 170 | return 0; |
| 171 | } |
| 172 | |
| 173 | static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, |
| 174 | unsigned long end) |
| 175 | { |
| 176 | p4d_t *p4d = p4d_offset(pgd, addr); |
| 177 | unsigned long next; |
| 178 | |
| 179 | do { |
| 180 | next = p4d_addr_end(addr, end); |
| 181 | if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) { |
| 182 | pud_t *pud; |
| 183 | pmd_t *pmd; |
| 184 | |
| 185 | p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud)); |
| 186 | pud = pud_offset(p4d, addr); |
| 187 | pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); |
| 188 | pmd = pmd_offset(pud, addr); |
| 189 | pmd_populate_kernel(&init_mm, pmd, |
| 190 | lm_alias(kasan_zero_pte)); |
| 191 | continue; |
| 192 | } |
| 193 | |
| 194 | if (p4d_none(*p4d)) { |
| 195 | pud_t *p; |
| 196 | |
| 197 | if (slab_is_available()) { |
| 198 | p = pud_alloc(&init_mm, p4d, addr); |
| 199 | if (!p) |
| 200 | return -ENOMEM; |
| 201 | } else { |
| 202 | p4d_populate(&init_mm, p4d, |
| 203 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
| 204 | } |
| 205 | } |
| 206 | zero_pud_populate(p4d, addr, next); |
| 207 | } while (p4d++, addr = next, addr != end); |
| 208 | |
| 209 | return 0; |
| 210 | } |
| 211 | |
| 212 | /** |
| 213 | * kasan_populate_zero_shadow - populate shadow memory region with |
| 214 | * kasan_zero_page |
| 215 | * @shadow_start - start of the memory range to populate |
| 216 | * @shadow_end - end of the memory range to populate |
| 217 | */ |
| 218 | int __ref kasan_populate_zero_shadow(const void *shadow_start, |
| 219 | const void *shadow_end) |
| 220 | { |
| 221 | unsigned long addr = (unsigned long)shadow_start; |
| 222 | unsigned long end = (unsigned long)shadow_end; |
| 223 | pgd_t *pgd = pgd_offset_k(addr); |
| 224 | unsigned long next; |
| 225 | |
| 226 | do { |
| 227 | next = pgd_addr_end(addr, end); |
| 228 | |
| 229 | if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { |
| 230 | p4d_t *p4d; |
| 231 | pud_t *pud; |
| 232 | pmd_t *pmd; |
| 233 | |
| 234 | /* |
| 235 | * kasan_zero_pud should be populated with pmds |
| 236 | * at this moment. |
| 237 | * [pud,pmd]_populate*() below needed only for |
| 238 | * 3,2 - level page tables where we don't have |
| 239 | * puds,pmds, so pgd_populate(), pud_populate() |
| 240 | * is noops. |
| 241 | * |
| 242 | * The ifndef is required to avoid build breakage. |
| 243 | * |
| 244 | * With 5level-fixup.h, pgd_populate() is not nop and |
| 245 | * we reference kasan_zero_p4d. It's not defined |
| 246 | * unless 5-level paging enabled. |
| 247 | * |
| 248 | * The ifndef can be dropped once all KASAN-enabled |
| 249 | * architectures will switch to pgtable-nop4d.h. |
| 250 | */ |
| 251 | #ifndef __ARCH_HAS_5LEVEL_HACK |
| 252 | pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d)); |
| 253 | #endif |
| 254 | p4d = p4d_offset(pgd, addr); |
| 255 | p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud)); |
| 256 | pud = pud_offset(p4d, addr); |
| 257 | pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); |
| 258 | pmd = pmd_offset(pud, addr); |
| 259 | pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); |
| 260 | continue; |
| 261 | } |
| 262 | |
| 263 | if (pgd_none(*pgd)) { |
| 264 | p4d_t *p; |
| 265 | |
| 266 | if (slab_is_available()) { |
| 267 | p = p4d_alloc(&init_mm, pgd, addr); |
| 268 | if (!p) |
| 269 | return -ENOMEM; |
| 270 | } else { |
| 271 | pgd_populate(&init_mm, pgd, |
| 272 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
| 273 | } |
| 274 | } |
| 275 | zero_p4d_populate(pgd, addr, next); |
| 276 | } while (pgd++, addr = next, addr != end); |
| 277 | |
| 278 | return 0; |
| 279 | } |
| 280 | |
| 281 | static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) |
| 282 | { |
| 283 | pte_t *pte; |
| 284 | int i; |
| 285 | |
| 286 | for (i = 0; i < PTRS_PER_PTE; i++) { |
| 287 | pte = pte_start + i; |
| 288 | if (!pte_none(*pte)) |
| 289 | return; |
| 290 | } |
| 291 | |
| 292 | pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd))); |
| 293 | pmd_clear(pmd); |
| 294 | } |
| 295 | |
| 296 | static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud) |
| 297 | { |
| 298 | pmd_t *pmd; |
| 299 | int i; |
| 300 | |
| 301 | for (i = 0; i < PTRS_PER_PMD; i++) { |
| 302 | pmd = pmd_start + i; |
| 303 | if (!pmd_none(*pmd)) |
| 304 | return; |
| 305 | } |
| 306 | |
| 307 | pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud))); |
| 308 | pud_clear(pud); |
| 309 | } |
| 310 | |
| 311 | static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d) |
| 312 | { |
| 313 | pud_t *pud; |
| 314 | int i; |
| 315 | |
| 316 | for (i = 0; i < PTRS_PER_PUD; i++) { |
| 317 | pud = pud_start + i; |
| 318 | if (!pud_none(*pud)) |
| 319 | return; |
| 320 | } |
| 321 | |
| 322 | pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d))); |
| 323 | p4d_clear(p4d); |
| 324 | } |
| 325 | |
| 326 | static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd) |
| 327 | { |
| 328 | p4d_t *p4d; |
| 329 | int i; |
| 330 | |
| 331 | for (i = 0; i < PTRS_PER_P4D; i++) { |
| 332 | p4d = p4d_start + i; |
| 333 | if (!p4d_none(*p4d)) |
| 334 | return; |
| 335 | } |
| 336 | |
| 337 | p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd))); |
| 338 | pgd_clear(pgd); |
| 339 | } |
| 340 | |
| 341 | static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, |
| 342 | unsigned long end) |
| 343 | { |
| 344 | unsigned long next; |
| 345 | |
| 346 | for (; addr < end; addr = next, pte++) { |
| 347 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
| 348 | if (next > end) |
| 349 | next = end; |
| 350 | |
| 351 | if (!pte_present(*pte)) |
| 352 | continue; |
| 353 | |
| 354 | if (WARN_ON(!kasan_zero_page_entry(*pte))) |
| 355 | continue; |
| 356 | pte_clear(&init_mm, addr, pte); |
| 357 | } |
| 358 | } |
| 359 | |
| 360 | static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, |
| 361 | unsigned long end) |
| 362 | { |
| 363 | unsigned long next; |
| 364 | |
| 365 | for (; addr < end; addr = next, pmd++) { |
| 366 | pte_t *pte; |
| 367 | |
| 368 | next = pmd_addr_end(addr, end); |
| 369 | |
| 370 | if (!pmd_present(*pmd)) |
| 371 | continue; |
| 372 | |
| 373 | if (kasan_pte_table(*pmd)) { |
| 374 | if (IS_ALIGNED(addr, PMD_SIZE) && |
| 375 | IS_ALIGNED(next, PMD_SIZE)) |
| 376 | pmd_clear(pmd); |
| 377 | continue; |
| 378 | } |
| 379 | pte = pte_offset_kernel(pmd, addr); |
| 380 | kasan_remove_pte_table(pte, addr, next); |
| 381 | kasan_free_pte(pte_offset_kernel(pmd, 0), pmd); |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | static void kasan_remove_pud_table(pud_t *pud, unsigned long addr, |
| 386 | unsigned long end) |
| 387 | { |
| 388 | unsigned long next; |
| 389 | |
| 390 | for (; addr < end; addr = next, pud++) { |
| 391 | pmd_t *pmd, *pmd_base; |
| 392 | |
| 393 | next = pud_addr_end(addr, end); |
| 394 | |
| 395 | if (!pud_present(*pud)) |
| 396 | continue; |
| 397 | |
| 398 | if (kasan_pmd_table(*pud)) { |
| 399 | if (IS_ALIGNED(addr, PUD_SIZE) && |
| 400 | IS_ALIGNED(next, PUD_SIZE)) |
| 401 | pud_clear(pud); |
| 402 | continue; |
| 403 | } |
| 404 | pmd = pmd_offset(pud, addr); |
| 405 | pmd_base = pmd_offset(pud, 0); |
| 406 | kasan_remove_pmd_table(pmd, addr, next); |
| 407 | kasan_free_pmd(pmd_base, pud); |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr, |
| 412 | unsigned long end) |
| 413 | { |
| 414 | unsigned long next; |
| 415 | |
| 416 | for (; addr < end; addr = next, p4d++) { |
| 417 | pud_t *pud; |
| 418 | |
| 419 | next = p4d_addr_end(addr, end); |
| 420 | |
| 421 | if (!p4d_present(*p4d)) |
| 422 | continue; |
| 423 | |
| 424 | if (kasan_pud_table(*p4d)) { |
| 425 | if (IS_ALIGNED(addr, P4D_SIZE) && |
| 426 | IS_ALIGNED(next, P4D_SIZE)) |
| 427 | p4d_clear(p4d); |
| 428 | continue; |
| 429 | } |
| 430 | pud = pud_offset(p4d, addr); |
| 431 | kasan_remove_pud_table(pud, addr, next); |
| 432 | kasan_free_pud(pud_offset(p4d, 0), p4d); |
| 433 | } |
| 434 | } |
| 435 | |
| 436 | void kasan_remove_zero_shadow(void *start, unsigned long size) |
| 437 | { |
| 438 | unsigned long addr, end, next; |
| 439 | pgd_t *pgd; |
| 440 | |
| 441 | addr = (unsigned long)kasan_mem_to_shadow(start); |
| 442 | end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); |
| 443 | |
| 444 | if (WARN_ON((unsigned long)start % |
| 445 | (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || |
| 446 | WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) |
| 447 | return; |
| 448 | |
| 449 | for (; addr < end; addr = next) { |
| 450 | p4d_t *p4d; |
| 451 | |
| 452 | next = pgd_addr_end(addr, end); |
| 453 | |
| 454 | pgd = pgd_offset_k(addr); |
| 455 | if (!pgd_present(*pgd)) |
| 456 | continue; |
| 457 | |
| 458 | if (kasan_p4d_table(*pgd)) { |
| 459 | if (IS_ALIGNED(addr, PGDIR_SIZE) && |
| 460 | IS_ALIGNED(next, PGDIR_SIZE)) |
| 461 | pgd_clear(pgd); |
| 462 | continue; |
| 463 | } |
| 464 | |
| 465 | p4d = p4d_offset(pgd, addr); |
| 466 | kasan_remove_p4d_table(p4d, addr, next); |
| 467 | kasan_free_p4d(p4d_offset(pgd, 0), pgd); |
| 468 | } |
| 469 | } |
| 470 | |
| 471 | int kasan_add_zero_shadow(void *start, unsigned long size) |
| 472 | { |
| 473 | int ret; |
| 474 | void *shadow_start, *shadow_end; |
| 475 | |
| 476 | shadow_start = kasan_mem_to_shadow(start); |
| 477 | shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); |
| 478 | |
| 479 | if (WARN_ON((unsigned long)start % |
| 480 | (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || |
| 481 | WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) |
| 482 | return -EINVAL; |
| 483 | |
| 484 | ret = kasan_populate_zero_shadow(shadow_start, shadow_end); |
| 485 | if (ret) |
| 486 | kasan_remove_zero_shadow(shadow_start, |
| 487 | size >> KASAN_SHADOW_SCALE_SHIFT); |
| 488 | return ret; |
| 489 | } |