powerpc/mm: Pass node id into create_section_mapping
[linux-2.6-block.git] / arch / powerpc / mm / mem.c
CommitLineData
14cf11af
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
4b16f8e2 20#include <linux/export.h>
14cf11af
PM
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/string.h>
5a0e3ad6 25#include <linux/gfp.h>
14cf11af
PM
26#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/stddef.h>
29#include <linux/init.h>
30#include <linux/bootmem.h>
31#include <linux/highmem.h>
32#include <linux/initrd.h>
33#include <linux/pagemap.h>
4e8ad3e8 34#include <linux/suspend.h>
95f72d1e 35#include <linux/memblock.h>
0895ecda 36#include <linux/hugetlb.h>
c40dd2f7 37#include <linux/slab.h>
16d0f5c4 38#include <linux/vmalloc.h>
b584c254 39#include <linux/memremap.h>
14cf11af
PM
40
41#include <asm/pgalloc.h>
42#include <asm/prom.h>
43#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
47#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/btext.h>
50#include <asm/tlb.h>
7c8c6b97 51#include <asm/sections.h>
db7f37de 52#include <asm/sparsemem.h>
ab1f9dac 53#include <asm/vdso.h>
2c419bde 54#include <asm/fixmap.h>
a9327296 55#include <asm/swiotlb.h>
8a3e3d31 56#include <asm/rtas.h>
14cf11af 57
14cf11af
PM
58#include "mmu_decl.h"
59
60#ifndef CPU_FTR_COHERENT_ICACHE
61#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
62#define CPU_FTR_NOEXECUTE 0
63#endif
64
a84fcd46 65unsigned long long memory_limit;
7c8c6b97 66
2c419bde
KG
67#ifdef CONFIG_HIGHMEM
68pte_t *kmap_pte;
8040bda3 69EXPORT_SYMBOL(kmap_pte);
2c419bde 70pgprot_t kmap_prot;
2c419bde 71EXPORT_SYMBOL(kmap_prot);
dd0b52c4 72#define TOP_ZONE ZONE_HIGHMEM
2c419bde
KG
73
74static inline pte_t *virt_to_kpte(unsigned long vaddr)
75{
76 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
77 vaddr), vaddr), vaddr);
78}
dd0b52c4
OH
79#else
80#define TOP_ZONE ZONE_NORMAL
2c419bde
KG
81#endif
82
14cf11af
PM
83int page_is_ram(unsigned long pfn)
84{
14cf11af 85#ifndef CONFIG_PPC64 /* XXX for now */
a880e762 86 return pfn < max_pfn;
14cf11af 87#else
a880e762 88 unsigned long paddr = (pfn << PAGE_SHIFT);
28be7072 89 struct memblock_region *reg;
14cf11af 90
28be7072
BH
91 for_each_memblock(memory, reg)
92 if (paddr >= reg->base && paddr < (reg->base + reg->size))
14cf11af 93 return 1;
14cf11af
PM
94 return 0;
95#endif
96}
14cf11af 97
8b150478 98pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
14cf11af
PM
99 unsigned long size, pgprot_t vma_prot)
100{
101 if (ppc_md.phys_mem_access_prot)
8b150478 102 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
14cf11af 103
8b150478 104 if (!page_is_ram(pfn))
64b3d0e8
BH
105 vma_prot = pgprot_noncached(vma_prot);
106
14cf11af
PM
107 return vma_prot;
108}
109EXPORT_SYMBOL(phys_mem_access_prot);
110
23fd0775
PM
111#ifdef CONFIG_MEMORY_HOTPLUG
112
bc02af93
YG
113#ifdef CONFIG_NUMA
114int memory_add_physaddr_to_nid(u64 start)
115{
116 return hot_add_scn_to_nid(start);
117}
118#endif
119
29ab6c47 120int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
fecbfabe
BH
121{
122 return -ENODEV;
123}
124
125int __weak remove_section_mapping(unsigned long start, unsigned long end)
126{
127 return -ENODEV;
128}
129
24e6d5a5
CH
130int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
131 bool want_memblock)
23fd0775 132{
23fd0775
PM
133 unsigned long start_pfn = start >> PAGE_SHIFT;
134 unsigned long nr_pages = size >> PAGE_SHIFT;
1dace6c6 135 int rc;
23fd0775 136
438cc81a
DG
137 resize_hpt_for_hotplug(memblock_phys_mem_size());
138
2d0eee14 139 start = (unsigned long)__va(start);
29ab6c47 140 rc = create_section_mapping(start, start + size, nid);
1dace6c6 141 if (rc) {
f2c2cbcc 142 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
1dace6c6
DG
143 start, start + size, rc);
144 return -EFAULT;
145 }
54b79248 146
24e6d5a5 147 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
23fd0775 148}
24d335ca
WC
149
150#ifdef CONFIG_MEMORY_HOTREMOVE
da024512 151int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
24d335ca
WC
152{
153 unsigned long start_pfn = start >> PAGE_SHIFT;
154 unsigned long nr_pages = size >> PAGE_SHIFT;
b584c254 155 struct page *page;
9ac8cde9 156 int ret;
24d335ca 157
b584c254
OH
158 /*
159 * If we have an altmap then we need to skip over any reserved PFNs
160 * when querying the zone.
161 */
162 page = pfn_to_page(start_pfn);
b584c254
OH
163 if (altmap)
164 page += vmem_altmap_offset(altmap);
165
da024512 166 ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
16d0f5c4
AB
167 if (ret)
168 return ret;
169
170 /* Remove htab bolted mappings for this section of memory */
171 start = (unsigned long)__va(start);
172 ret = remove_section_mapping(start, start + size);
173
174 /* Ensure all vmalloc mappings are flushed in case they also
175 * hit that section of memory
176 */
177 vm_unmap_aliases();
9ac8cde9 178
438cc81a
DG
179 resize_hpt_for_hotplug(memblock_phys_mem_size());
180
9ac8cde9 181 return ret;
24d335ca
WC
182}
183#endif
0d579944 184#endif /* CONFIG_MEMORY_HOTPLUG */
a99824f3
BP
185
186/*
187 * walk_memory_resource() needs to make sure there is no holes in a given
9d88a2eb 188 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
95f72d1e 189 * Instead it maintains it in memblock.memory structures. Walk through the
9d88a2eb 190 * memory regions, find holes and callback for contiguous regions.
a99824f3
BP
191 */
192int
908eedc6
KH
193walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
194 void *arg, int (*func)(unsigned long, unsigned long, void *))
a99824f3 195{
28be7072
BH
196 struct memblock_region *reg;
197 unsigned long end_pfn = start_pfn + nr_pages;
198 unsigned long tstart, tend;
9d88a2eb
BP
199 int ret = -1;
200
28be7072 201 for_each_memblock(memory, reg) {
c7fc2de0
YL
202 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
203 tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
28be7072
BH
204 if (tstart >= tend)
205 continue;
206 ret = (*func)(tstart, tend - tstart, arg);
9d88a2eb
BP
207 if (ret)
208 break;
9d88a2eb
BP
209 }
210 return ret;
a99824f3 211}
908eedc6 212EXPORT_SYMBOL_GPL(walk_system_ram_range);
a99824f3 213
7c8c6b97 214#ifndef CONFIG_NEED_MULTIPLE_NODES
9bd9be00 215void __init mem_topology_setup(void)
7c8c6b97 216{
95f72d1e 217 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
10239733 218 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
7c8c6b97 219#ifdef CONFIG_HIGHMEM
d7917ba7 220 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
7c8c6b97
PM
221#endif
222
4e8309ba
CS
223 /* Place all memblock_regions in the same node and merge contiguous
224 * memblock_regions
225 */
e7e8de59 226 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
9bd9be00 227}
c67c3cb4 228
9bd9be00
NP
229void __init initmem_init(void)
230{
7c8c6b97 231 /* XXX need to clip this if using highmem? */
c67c3cb4 232 sparse_memory_present_with_active_regions(0);
21098b9e 233 sparse_init();
7c8c6b97
PM
234}
235
4e8ad3e8
JB
236/* mark pages that don't exist as nosave */
237static int __init mark_nonram_nosave(void)
238{
28be7072
BH
239 struct memblock_region *reg, *prev = NULL;
240
241 for_each_memblock(memory, reg) {
242 if (prev &&
c7fc2de0
YL
243 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
244 register_nosave_region(memblock_region_memory_end_pfn(prev),
245 memblock_region_memory_base_pfn(reg));
28be7072 246 prev = reg;
4e8ad3e8 247 }
4e8ad3e8
JB
248 return 0;
249}
6db35ad2
SW
250#else /* CONFIG_NEED_MULTIPLE_NODES */
251static int __init mark_nonram_nosave(void)
252{
253 return 0;
254}
255#endif
4e8ad3e8 256
1c98025c
SW
257static bool zone_limits_final;
258
3079abe5
OH
259/*
260 * The memory zones past TOP_ZONE are managed by generic mm code.
261 * These should be set to zero since that's what every other
262 * architecture does.
263 */
1c98025c 264static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
3079abe5
OH
265 [0 ... TOP_ZONE ] = ~0UL,
266 [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
1c98025c
SW
267};
268
269/*
270 * Restrict the specified zone and all more restrictive zones
271 * to be below the specified pfn. May not be called after
272 * paging_init().
273 */
274void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
275{
276 int i;
277
278 if (WARN_ON(zone_limits_final))
279 return;
280
281 for (i = zone; i >= 0; i--) {
282 if (max_zone_pfns[i] > pfn_limit)
283 max_zone_pfns[i] = pfn_limit;
284 }
285}
286
287/*
288 * Find the least restrictive zone that is entirely below the
289 * specified pfn limit. Returns < 0 if no suitable zone is found.
290 *
291 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
292 * systems -- the DMA limit can be higher than any possible real pfn.
293 */
294int dma_pfn_limit_to_zone(u64 pfn_limit)
295{
1c98025c
SW
296 int i;
297
dd0b52c4 298 for (i = TOP_ZONE; i >= 0; i--) {
1c98025c
SW
299 if (max_zone_pfns[i] <= pfn_limit)
300 return i;
301 }
302
303 return -EPERM;
304}
4e8ad3e8 305
7c8c6b97
PM
306/*
307 * paging_init() sets up the page tables - in fact we've already done this.
308 */
309void __init paging_init(void)
310{
f7ba2991 311 unsigned long long total_ram = memblock_phys_mem_size();
95f72d1e 312 phys_addr_t top_of_ram = memblock_end_of_DRAM();
7c8c6b97 313
2c419bde
KG
314#ifdef CONFIG_PPC32
315 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
316 unsigned long end = __fix_to_virt(FIX_HOLE);
317
318 for (; v < end; v += PAGE_SIZE)
4386c096 319 map_kernel_page(v, 0, 0); /* XXX gross */
2c419bde
KG
320#endif
321
7c8c6b97 322#ifdef CONFIG_HIGHMEM
4386c096 323 map_kernel_page(PKMAP_BASE, 0, 0); /* XXX gross */
2c419bde
KG
324 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
325
326 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
7c8c6b97
PM
327 kmap_prot = PAGE_KERNEL;
328#endif /* CONFIG_HIGHMEM */
329
f7ba2991 330 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
fb610635 331 (unsigned long long)top_of_ram, total_ram);
e110b281 332 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
2bf3016f 333 (long int)((top_of_ram - total_ram) >> 20));
1c98025c 334
7c8c6b97 335#ifdef CONFIG_HIGHMEM
1c98025c 336 limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
c67c3cb4 337#endif
dd0b52c4 338 limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
1c98025c 339 zone_limits_final = true;
c67c3cb4 340 free_area_init_nodes(max_zone_pfns);
4e8ad3e8
JB
341
342 mark_nonram_nosave();
7c8c6b97 343}
7c8c6b97
PM
344
345void __init mem_init(void)
346{
28efc35f
SW
347 /*
348 * book3s is limited to 16 page sizes due to encoding this in
349 * a 4-bit field for slices.
350 */
351 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
352
a9327296 353#ifdef CONFIG_SWIOTLB
688ba1db 354 swiotlb_init(0);
a9327296
FT
355#endif
356
7c8c6b97 357 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
602ddc70 358 set_max_mapnr(max_pfn);
0c988534 359 free_all_bootmem();
7c8c6b97
PM
360
361#ifdef CONFIG_HIGHMEM
362 {
363 unsigned long pfn, highmem_mapnr;
364
d7917ba7 365 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
7c8c6b97 366 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
3d41e0f6 367 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
7c8c6b97 368 struct page *page = pfn_to_page(pfn);
369a9d85
JL
369 if (!memblock_is_reserved(paddr))
370 free_highmem_page(page);
7c8c6b97 371 }
7c8c6b97
PM
372 }
373#endif /* CONFIG_HIGHMEM */
374
3160b097
BB
375#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
376 /*
377 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
378 * functions.... do it here for the non-smp case.
379 */
380 per_cpu(next_tlbcam_idx, smp_processor_id()) =
381 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
382#endif
383
369a9d85 384 mem_init_print_info(NULL);
f637a49e
BH
385#ifdef CONFIG_PPC32
386 pr_info("Kernel virtual memory layout:\n");
387 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
388#ifdef CONFIG_HIGHMEM
389 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
390 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
391#endif /* CONFIG_HIGHMEM */
8b31e49d
BH
392#ifdef CONFIG_NOT_COHERENT_CACHE
393 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
394 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
395#endif /* CONFIG_NOT_COHERENT_CACHE */
f637a49e
BH
396 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
397 ioremap_bot, IOREMAP_TOP);
398 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
399 VMALLOC_START, VMALLOC_END);
400#endif /* CONFIG_PPC32 */
7c8c6b97
PM
401}
402
2773fcc8
DC
403void free_initmem(void)
404{
a9c0f41b 405 ppc_md.progress = ppc_printk_progress;
029d9252 406 mark_initmem_nx();
5d585e5c 407 free_initmem_default(POISON_FREE_INITMEM);
2773fcc8
DC
408}
409
307cfe71
BH
410#ifdef CONFIG_BLK_DEV_INITRD
411void __init free_initrd_mem(unsigned long start, unsigned long end)
412{
dbe67df4 413 free_reserved_area((void *)start, (void *)end, -1, "initrd");
307cfe71
BH
414}
415#endif
416
14cf11af
PM
417/*
418 * This is called when a page has been modified by the kernel.
419 * It just marks the page as not i-cache clean. We do the i-cache
420 * flush later when the page is given to a user process, if necessary.
421 */
422void flush_dcache_page(struct page *page)
423{
424 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
425 return;
426 /* avoid an atomic op if possible */
427 if (test_bit(PG_arch_1, &page->flags))
428 clear_bit(PG_arch_1, &page->flags);
429}
430EXPORT_SYMBOL(flush_dcache_page);
431
432void flush_dcache_icache_page(struct page *page)
433{
0895ecda
DG
434#ifdef CONFIG_HUGETLB_PAGE
435 if (PageCompound(page)) {
436 flush_dcache_icache_hugepage(page);
437 return;
438 }
439#endif
968159c0 440#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
2f7d2b74
SW
441 /* On 8xx there is no need to kmap since highmem is not supported */
442 __flush_dcache_icache(page_address(page));
443#else
444 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
2480b208 445 void *start = kmap_atomic(page);
0895ecda 446 __flush_dcache_icache(start);
2480b208 447 kunmap_atomic(start);
2f7d2b74
SW
448 } else {
449 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
0895ecda 450 }
14cf11af 451#endif
14cf11af 452}
249ba1ee 453EXPORT_SYMBOL(flush_dcache_icache_page);
0895ecda 454
14cf11af
PM
455void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
456{
457 clear_page(page);
458
14cf11af 459 /*
25985edc 460 * We shouldn't have to do this, but some versions of glibc
14cf11af
PM
461 * require it (ld.so assumes zero filled pages are icache clean)
462 * - Anton
463 */
09f5dc44 464 flush_dcache_page(pg);
14cf11af
PM
465}
466EXPORT_SYMBOL(clear_user_page);
467
468void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
469 struct page *pg)
470{
471 copy_page(vto, vfrom);
472
473 /*
474 * We should be able to use the following optimisation, however
475 * there are two problems.
476 * Firstly a bug in some versions of binutils meant PLT sections
477 * were not marked executable.
478 * Secondly the first word in the GOT section is blrl, used
479 * to establish the GOT address. Until recently the GOT was
480 * not marked executable.
481 * - Anton
482 */
483#if 0
484 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
485 return;
486#endif
487
09f5dc44 488 flush_dcache_page(pg);
14cf11af
PM
489}
490
491void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
492 unsigned long addr, int len)
493{
494 unsigned long maddr;
495
496 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
497 flush_icache_range(maddr, maddr + len);
498 kunmap(page);
499}
500EXPORT_SYMBOL(flush_icache_user_range);
501
502/*
503 * This is called at the end of handling a user page fault, when the
504 * fault has been handled by updating a PTE in the linux page tables.
505 * We use it to preload an HPTE into the hash table corresponding to
506 * the updated linux PTE.
507 *
01edcd89 508 * This must always be called with the pte lock held.
14cf11af
PM
509 */
510void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
4b3073e1 511 pte_t *ptep)
14cf11af 512{
3c726f8d 513#ifdef CONFIG_PPC_STD_MMU
0ac52dd7
AK
514 /*
515 * We don't need to worry about _PAGE_PRESENT here because we are
516 * called with either mm->page_table_lock held or ptl lock held
517 */
171cb719
GS
518 unsigned long access, trap;
519
a3dece6d
AK
520 if (radix_enabled())
521 return;
14cf11af 522
14cf11af 523 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
4b3073e1 524 if (!pte_young(*ptep) || address >= TASK_SIZE)
14cf11af 525 return;
14cf11af 526
3c726f8d
BH
527 /* We try to figure out if we are coming from an instruction
528 * access fault and pass that down to __hash_page so we avoid
529 * double-faulting on execution of fresh text. We have to test
530 * for regs NULL since init will get here first thing at boot
531 *
532 * We also avoid filling the hash if not coming from a fault
533 */
171cb719
GS
534
535 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
536 switch (trap) {
537 case 0x300:
538 access = 0UL;
539 break;
540 case 0x400:
541 access = _PAGE_EXEC;
542 break;
543 default:
3c726f8d 544 return;
171cb719
GS
545 }
546
3c726f8d
BH
547 hash_preload(vma->vm_mm, address, access, trap);
548#endif /* CONFIG_PPC_STD_MMU */
41151e77
BB
549#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
550 && defined(CONFIG_HUGETLB_PAGE)
551 if (is_vm_hugetlb_page(vma))
d93e4d7d 552 book3e_hugetlb_preload(vma, address, *ptep);
41151e77 553#endif
14cf11af 554}
c40dd2f7
AB
555
556/*
557 * System memory should not be in /proc/iomem but various tools expect it
558 * (eg kdump).
559 */
4f770924 560static int __init add_system_ram_resources(void)
c40dd2f7
AB
561{
562 struct memblock_region *reg;
563
564 for_each_memblock(memory, reg) {
565 struct resource *res;
566 unsigned long base = reg->base;
567 unsigned long size = reg->size;
568
569 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
570 WARN_ON(!res);
571
572 if (res) {
573 res->name = "System RAM";
574 res->start = base;
575 res->end = base + size - 1;
35d98e93 576 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
c40dd2f7
AB
577 WARN_ON(request_resource(&iomem_resource, res) < 0);
578 }
579 }
580
581 return 0;
582}
583subsys_initcall(add_system_ram_resources);
1d54cf2b 584
585#ifdef CONFIG_STRICT_DEVMEM
586/*
587 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
588 * is valid. The argument is a physical page number.
589 *
590 * Access has to be given to non-kernel-ram areas as well, these contain the
591 * PCI mmio resources as well as potential bios/acpi data regions.
592 */
593int devmem_is_allowed(unsigned long pfn)
594{
e256caa7
VH
595 if (page_is_rtas_user_buf(pfn))
596 return 1;
6c0cc627 597 if (iomem_is_exclusive(PFN_PHYS(pfn)))
1d54cf2b 598 return 0;
599 if (!page_is_ram(pfn))
600 return 1;
601 return 0;
602}
603#endif /* CONFIG_STRICT_DEVMEM */