2 * Procedures for creating, accessing and interpreting the device tree.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/stringify.h>
27 #include <linux/delay.h>
28 #include <linux/initrd.h>
29 #include <linux/bitops.h>
30 #include <linux/module.h>
31 #include <linux/kexec.h>
32 #include <linux/debugfs.h>
33 #include <linux/irq.h>
34 #include <linux/lmb.h>
39 #include <asm/processor.h>
42 #include <asm/kdump.h>
44 #include <asm/system.h>
46 #include <asm/pgtable.h>
48 #include <asm/iommu.h>
49 #include <asm/btext.h>
50 #include <asm/sections.h>
51 #include <asm/machdep.h>
52 #include <asm/pSeries_reconfig.h>
53 #include <asm/pci-bridge.h>
54 #include <asm/phyp_dump.h>
55 #include <asm/kexec.h>
56 #include <mm/mmu_decl.h>
59 #define DBG(fmt...) printk(KERN_ERR fmt)
65 int __initdata iommu_is_off;
66 int __initdata iommu_force_on;
67 unsigned long tce_alloc_start, tce_alloc_end;
70 static int __init early_parse_mem(char *p)
75 memory_limit = PAGE_ALIGN(memparse(p, &p));
76 DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit);
80 early_param("mem", early_parse_mem);
83 * move_device_tree - move tree to an unused area, if needed.
85 * The device tree may be allocated beyond our memory limit, or inside the
86 * crash kernel region for kdump. If so, move it out of the way.
88 static void __init move_device_tree(void)
90 unsigned long start, size;
93 DBG("-> move_device_tree\n");
95 start = __pa(initial_boot_params);
96 size = be32_to_cpu(initial_boot_params->totalsize);
98 if ((memory_limit && (start + size) > memory_limit) ||
99 overlaps_crashkernel(start, size)) {
100 p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
101 memcpy(p, initial_boot_params, size);
102 initial_boot_params = (struct boot_param_header *)p;
103 DBG("Moved device tree to 0x%p\n", p);
106 DBG("<- move_device_tree\n");
110 * ibm,pa-features is a per-cpu property that contains a string of
111 * attribute descriptors, each of which has a 2 byte header plus up
112 * to 254 bytes worth of processor attribute bits. First header
113 * byte specifies the number of bytes following the header.
114 * Second header byte is an "attribute-specifier" type, of which
115 * zero is the only currently-defined value.
116 * Implementation: Pass in the byte and bit offset for the feature
117 * that we are interested in. The function will return -1 if the
118 * pa-features property is missing, or a 1/0 to indicate if the feature
119 * is supported/not supported. Note that the bit numbers are
120 * big-endian to match the definition in PAPR.
122 static struct ibm_pa_feature {
123 unsigned long cpu_features; /* CPU_FTR_xxx bit */
124 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
125 unsigned char pabyte; /* byte number in ibm,pa-features */
126 unsigned char pabit; /* bit number (big-endian) */
127 unsigned char invert; /* if 1, pa bit set => clear feature */
128 } ibm_pa_features[] __initdata = {
129 {0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
130 {0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
131 {CPU_FTR_SLB, 0, 0, 2, 0},
132 {CPU_FTR_CTRL, 0, 0, 3, 0},
133 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
134 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
135 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
136 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
139 static void __init scan_features(unsigned long node, unsigned char *ftrs,
140 unsigned long tablelen,
141 struct ibm_pa_feature *fp,
142 unsigned long ft_size)
144 unsigned long i, len, bit;
146 /* find descriptor with type == 0 */
152 return; /* descriptor 0 not found */
159 /* loop over bits we know about */
160 for (i = 0; i < ft_size; ++i, ++fp) {
161 if (fp->pabyte >= ftrs[0])
163 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
164 if (bit ^ fp->invert) {
165 cur_cpu_spec->cpu_features |= fp->cpu_features;
166 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
168 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
169 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
174 static void __init check_cpu_pa_features(unsigned long node)
176 unsigned char *pa_ftrs;
177 unsigned long tablelen;
179 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
183 scan_features(node, pa_ftrs, tablelen,
184 ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
187 #ifdef CONFIG_PPC_STD_MMU_64
188 static void __init check_cpu_slb_size(unsigned long node)
192 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
193 if (slb_size_ptr != NULL) {
194 mmu_slb_size = *slb_size_ptr;
197 slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
198 if (slb_size_ptr != NULL) {
199 mmu_slb_size = *slb_size_ptr;
203 #define check_cpu_slb_size(node) do { } while(0)
206 static struct feature_property {
209 unsigned long cpu_feature;
210 unsigned long cpu_user_ftr;
211 } feature_properties[] __initdata = {
212 #ifdef CONFIG_ALTIVEC
213 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
214 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
215 #endif /* CONFIG_ALTIVEC */
217 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
218 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
219 #endif /* CONFIG_VSX */
221 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
222 {"ibm,purr", 1, CPU_FTR_PURR, 0},
223 {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
224 #endif /* CONFIG_PPC64 */
227 #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
228 static inline void identical_pvr_fixup(unsigned long node)
231 char *model = of_get_flat_dt_prop(node, "model", NULL);
234 * Since 440GR(x)/440EP(x) processors have the same pvr,
235 * we check the node path and set bit 28 in the cur_cpu_spec
236 * pvr for EP(x) processor version. This bit is always 0 in
237 * the "real" pvr. Then we call identify_cpu again with
238 * the new logical pvr to enable FPU support.
240 if (model && strstr(model, "440EP")) {
241 pvr = cur_cpu_spec->pvr_value | 0x8;
242 identify_cpu(0, pvr);
243 DBG("Using logical pvr %x for %s\n", pvr, model);
247 #define identical_pvr_fixup(node) do { } while(0)
250 static void __init check_cpu_feature_properties(unsigned long node)
253 struct feature_property *fp = feature_properties;
256 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
257 prop = of_get_flat_dt_prop(node, fp->name, NULL);
258 if (prop && *prop >= fp->min_value) {
259 cur_cpu_spec->cpu_features |= fp->cpu_feature;
260 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
265 static int __init early_init_dt_scan_cpus(unsigned long node,
266 const char *uname, int depth,
269 static int logical_cpuid = 0;
270 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
277 /* We are scanning "cpu" nodes only */
278 if (type == NULL || strcmp(type, "cpu") != 0)
281 /* Get physical cpuid */
282 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
284 nthreads = len / sizeof(int);
286 intserv = of_get_flat_dt_prop(node, "reg", NULL);
291 * Now see if any of these threads match our boot cpu.
292 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
294 for (i = 0; i < nthreads; i++) {
296 * version 2 of the kexec param format adds the phys cpuid of
299 if (initial_boot_params && initial_boot_params->version >= 2) {
301 initial_boot_params->boot_cpuid_phys) {
307 * Check if it's the boot-cpu, set it's hw index now,
308 * unfortunately this format did not support booting
309 * off secondary threads.
311 if (of_get_flat_dt_prop(node,
312 "linux,boot-cpu", NULL) != NULL) {
319 /* logical cpu id is always 0 on UP kernels */
325 DBG("boot cpu: logical %d physical %d\n", logical_cpuid,
327 boot_cpuid = logical_cpuid;
328 set_hard_smp_processor_id(boot_cpuid, intserv[i]);
331 * PAPR defines "logical" PVR values for cpus that
332 * meet various levels of the architecture:
333 * 0x0f000001 Architecture version 2.04
334 * 0x0f000002 Architecture version 2.05
335 * If the cpu-version property in the cpu node contains
336 * such a value, we call identify_cpu again with the
337 * logical PVR value in order to use the cpu feature
338 * bits appropriate for the architecture level.
340 * A POWER6 partition in "POWER6 architected" mode
341 * uses the 0x0f000002 PVR value; in POWER5+ mode
342 * it uses 0x0f000001.
344 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
345 if (prop && (*prop & 0xff000000) == 0x0f000000)
346 identify_cpu(0, *prop);
348 identical_pvr_fixup(node);
351 check_cpu_feature_properties(node);
352 check_cpu_pa_features(node);
353 check_cpu_slb_size(node);
355 #ifdef CONFIG_PPC_PSERIES
357 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
359 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
365 void __init early_init_dt_scan_chosen_arch(unsigned long node)
367 unsigned long *lprop;
370 /* check if iommu is forced on or off */
371 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
373 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
377 /* mem=x on the command line is the preferred mechanism */
378 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
380 memory_limit = *lprop;
383 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
385 tce_alloc_start = *lprop;
386 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
388 tce_alloc_end = *lprop;
392 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
394 crashk_res.start = *lprop;
396 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
398 crashk_res.end = crashk_res.start + *lprop - 1;
402 #ifdef CONFIG_PPC_PSERIES
404 * Interpret the ibm,dynamic-memory property in the
405 * /ibm,dynamic-reconfiguration-memory node.
406 * This contains a list of memory blocks along with NUMA affinity
409 static int __init early_init_dt_scan_drconf_memory(unsigned long node)
411 __be32 *dm, *ls, *usm;
412 unsigned long l, n, flags;
413 u64 base, size, lmb_size;
414 unsigned int is_kexec_kdump = 0, rngs;
416 ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
417 if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
419 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
421 dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
422 if (dm == NULL || l < sizeof(__be32))
425 n = *dm++; /* number of entries */
426 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32))
429 /* check if this is a kexec/kdump kernel. */
430 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
435 for (; n != 0; --n) {
436 base = dt_mem_next_cell(dt_root_addr_cells, &dm);
438 /* skip DRC index, pad, assoc. list index, flags */
440 /* skip this block if the reserved bit is set in flags (0x80)
441 or if the block is not assigned to this partition (0x8) */
442 if ((flags & 0x80) || !(flags & 0x8))
446 if (is_kexec_kdump) {
448 * For each lmb in ibm,dynamic-memory, a corresponding
449 * entry in linux,drconf-usable-memory property contains
450 * a counter 'p' followed by 'p' (base, size) duple.
451 * Now read the counter from
452 * linux,drconf-usable-memory property
454 rngs = dt_mem_next_cell(dt_root_size_cells, &usm);
455 if (!rngs) /* there are no (base, size) duple */
459 if (is_kexec_kdump) {
460 base = dt_mem_next_cell(dt_root_addr_cells,
462 size = dt_mem_next_cell(dt_root_size_cells,
466 if (base >= 0x80000000ul)
468 if ((base + size) > 0x80000000ul)
469 size = 0x80000000ul - base;
478 #define early_init_dt_scan_drconf_memory(node) 0
479 #endif /* CONFIG_PPC_PSERIES */
481 static int __init early_init_dt_scan_memory_ppc(unsigned long node,
483 int depth, void *data)
486 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
487 return early_init_dt_scan_drconf_memory(node);
489 return early_init_dt_scan_memory(node, uname, depth, data);
492 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
494 #if defined(CONFIG_PPC64)
496 if (base >= 0x80000000ul)
498 if ((base + size) > 0x80000000ul)
499 size = 0x80000000ul - base;
505 memstart_addr = min((u64)memstart_addr, base);
508 u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
510 return lmb_alloc(size, align);
513 #ifdef CONFIG_BLK_DEV_INITRD
514 void __init early_init_dt_setup_initrd_arch(unsigned long start,
517 initrd_start = (unsigned long)__va(start);
518 initrd_end = (unsigned long)__va(end);
519 initrd_below_start_ok = 1;
523 static void __init early_reserve_mem(void)
527 unsigned long self_base;
528 unsigned long self_size;
530 reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
531 initial_boot_params->off_mem_rsvmap);
533 /* before we do anything, lets reserve the dt blob */
534 self_base = __pa((unsigned long)initial_boot_params);
535 self_size = initial_boot_params->totalsize;
536 lmb_reserve(self_base, self_size);
538 #ifdef CONFIG_BLK_DEV_INITRD
539 /* then reserve the initrd, if any */
540 if (initrd_start && (initrd_end > initrd_start))
541 lmb_reserve(__pa(initrd_start), initrd_end - initrd_start);
542 #endif /* CONFIG_BLK_DEV_INITRD */
546 * Handle the case where we might be booting from an old kexec
547 * image that setup the mem_rsvmap as pairs of 32-bit values
549 if (*reserve_map > 0xffffffffull) {
550 u32 base_32, size_32;
551 u32 *reserve_map_32 = (u32 *)reserve_map;
554 base_32 = *(reserve_map_32++);
555 size_32 = *(reserve_map_32++);
558 /* skip if the reservation is for the blob */
559 if (base_32 == self_base && size_32 == self_size)
561 DBG("reserving: %x -> %x\n", base_32, size_32);
562 lmb_reserve(base_32, size_32);
568 base = *(reserve_map++);
569 size = *(reserve_map++);
572 DBG("reserving: %llx -> %llx\n", base, size);
573 lmb_reserve(base, size);
577 #ifdef CONFIG_PHYP_DUMP
579 * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
581 * Function to find the largest size we need to reserve
582 * during early boot process.
584 * It either looks for boot param and returns that OR
585 * returns larger of 256 or 5% rounded down to multiples of 256MB.
588 static inline unsigned long phyp_dump_calculate_reserve_size(void)
592 if (phyp_dump_info->reserve_bootvar)
593 return phyp_dump_info->reserve_bootvar;
595 /* divide by 20 to get 5% of value */
596 tmp = lmb_end_of_DRAM();
599 /* round it down in multiples of 256 */
600 tmp = tmp & ~0x0FFFFFFFUL;
602 return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
606 * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
608 * This routine may reserve memory regions in the kernel only
609 * if the system is supported and a dump was taken in last
610 * boot instance or if the hardware is supported and the
611 * scratch area needs to be setup. In other instances it returns
612 * without reserving anything. The memory in case of dump being
613 * active is freed when the dump is collected (by userland tools).
615 static void __init phyp_dump_reserve_mem(void)
617 unsigned long base, size;
618 unsigned long variable_reserve_size;
620 if (!phyp_dump_info->phyp_dump_configured) {
621 printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
625 if (!phyp_dump_info->phyp_dump_at_boot) {
626 printk(KERN_INFO "Phyp-dump disabled at boot time\n");
630 variable_reserve_size = phyp_dump_calculate_reserve_size();
632 if (phyp_dump_info->phyp_dump_is_active) {
633 /* Reserve *everything* above RMR.Area freed by userland tools*/
634 base = variable_reserve_size;
635 size = lmb_end_of_DRAM() - base;
637 /* XXX crashed_ram_end is wrong, since it may be beyond
638 * the memory_limit, it will need to be adjusted. */
639 lmb_reserve(base, size);
641 phyp_dump_info->init_reserve_start = base;
642 phyp_dump_info->init_reserve_size = size;
644 size = phyp_dump_info->cpu_state_size +
645 phyp_dump_info->hpte_region_size +
646 variable_reserve_size;
647 base = lmb_end_of_DRAM() - size;
648 lmb_reserve(base, size);
649 phyp_dump_info->init_reserve_start = base;
650 phyp_dump_info->init_reserve_size = size;
654 static inline void __init phyp_dump_reserve_mem(void) {}
655 #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
658 void __init early_init_devtree(void *params)
662 DBG(" -> early_init_devtree(%p)\n", params);
664 /* Setup flat device-tree pointer */
665 initial_boot_params = params;
667 #ifdef CONFIG_PPC_RTAS
668 /* Some machines might need RTAS info for debugging, grab it now. */
669 of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
672 #ifdef CONFIG_PHYP_DUMP
673 /* scan tree to see if dump occured during last boot */
674 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
677 /* Retrieve various informations from the /chosen node of the
678 * device-tree, including the platform type, initrd location and
679 * size, TCE reserve, and more ...
681 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
683 /* Scan memory nodes and rebuild LMBs */
685 of_scan_flat_dt(early_init_dt_scan_root, NULL);
686 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
688 /* Save command line for /proc/cmdline and then parse parameters */
689 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
692 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
693 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
694 /* If relocatable, reserve first 32k for interrupt vectors etc. */
695 if (PHYSICAL_START > MEMORY_START)
696 lmb_reserve(MEMORY_START, 0x8000);
697 reserve_kdump_trampoline();
698 reserve_crashkernel();
700 phyp_dump_reserve_mem();
702 limit = memory_limit;
706 /* Ensure that total memory size is page-aligned, because
707 * otherwise mark_bootmem() gets upset. */
709 memsize = lmb_phys_mem_size();
710 if ((memsize & PAGE_MASK) != memsize)
711 limit = memsize & PAGE_MASK;
713 lmb_enforce_memory_limit(limit);
718 DBG("Phys. mem: %llx\n", lmb_phys_mem_size());
720 /* We may need to relocate the flat tree, do it now.
721 * FIXME .. and the initrd too? */
724 DBG("Scanning CPUs ...\n");
726 /* Retreive CPU related informations from the flat tree
727 * (altivec support, boot CPU ID, ...)
729 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
731 DBG(" <- early_init_devtree()\n");
736 * New implementation of the OF "find" APIs, return a refcounted
737 * object, call of_node_put() when done. The device tree and list
738 * are protected by a rw_lock.
740 * Note that property management will need some locking as well,
741 * this isn't dealt with yet.
746 * of_find_next_cache_node - Find a node's subsidiary cache
747 * @np: node of type "cpu" or "cache"
749 * Returns a node pointer with refcount incremented, use
750 * of_node_put() on it when done. Caller should hold a reference
753 struct device_node *of_find_next_cache_node(struct device_node *np)
755 struct device_node *child;
756 const phandle *handle;
758 handle = of_get_property(np, "l2-cache", NULL);
760 handle = of_get_property(np, "next-level-cache", NULL);
763 return of_find_node_by_phandle(*handle);
765 /* OF on pmac has nodes instead of properties named "l2-cache"
768 if (!strcmp(np->type, "cpu"))
769 for_each_child_of_node(np, child)
770 if (!strcmp(child->type, "cache"))
776 #ifdef CONFIG_PPC_PSERIES
778 * Fix up the uninitialized fields in a new device node:
779 * name, type and pci-specific fields
782 static int of_finish_dynamic_node(struct device_node *node)
784 struct device_node *parent = of_get_parent(node);
786 const phandle *ibm_phandle;
788 node->name = of_get_property(node, "name", NULL);
789 node->type = of_get_property(node, "device_type", NULL);
792 node->name = "<NULL>";
794 node->type = "<NULL>";
801 /* We don't support that function on PowerMac, at least
804 if (machine_is(powermac))
807 /* fix up new node's phandle field */
808 if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL)))
809 node->phandle = *ibm_phandle;
816 static int prom_reconfig_notifier(struct notifier_block *nb,
817 unsigned long action, void *node)
822 case PSERIES_RECONFIG_ADD:
823 err = of_finish_dynamic_node(node);
825 printk(KERN_ERR "finish_node returned %d\n", err);
836 static struct notifier_block prom_reconfig_nb = {
837 .notifier_call = prom_reconfig_notifier,
838 .priority = 10, /* This one needs to run first */
841 static int __init prom_reconfig_setup(void)
843 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
845 __initcall(prom_reconfig_setup);
848 /* Find the device node for a given logical cpu number, also returns the cpu
849 * local thread number (index in ibm,interrupt-server#s) if relevant and
850 * asked for (non NULL)
852 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
855 struct device_node *np;
857 hardid = get_hard_smp_processor_id(cpu);
859 for_each_node_by_type(np, "cpu") {
861 unsigned int plen, t;
863 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
864 * fallback to "reg" property and assume no threads
866 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
868 if (intserv == NULL) {
869 const u32 *reg = of_get_property(np, "reg", NULL);
872 if (*reg == hardid) {
879 for (t = 0; t < plen; t++) {
880 if (hardid == intserv[t]) {
890 EXPORT_SYMBOL(of_get_cpu_node);
892 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
893 static struct debugfs_blob_wrapper flat_dt_blob;
895 static int __init export_flat_device_tree(void)
899 flat_dt_blob.data = initial_boot_params;
900 flat_dt_blob.size = initial_boot_params->totalsize;
902 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
903 powerpc_debugfs_root, &flat_dt_blob);
909 __initcall(export_flat_device_tree);