2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * SGI UV APIC functions (note: not an Intel compatible APIC)
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/proc_fs.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/ctype.h>
18 #include <linux/sched.h>
19 #include <linux/timer.h>
20 #include <linux/slab.h>
21 #include <linux/cpu.h>
22 #include <linux/init.h>
24 #include <linux/pci.h>
25 #include <linux/kdebug.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
29 #include <asm/uv/uv_mmrs.h>
30 #include <asm/uv/uv_hub.h>
31 #include <asm/current.h>
32 #include <asm/pgtable.h>
33 #include <asm/uv/bios.h>
34 #include <asm/uv/uv.h>
38 #include <asm/x86_init.h>
39 #include <asm/emergency-restart.h>
42 /* BMC sets a bit this MMR non-zero before sending an NMI */
43 #define UVH_NMI_MMR UVH_SCRATCH5
44 #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45 #define UV_NMI_PENDING_MASK (1UL << 63)
46 DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
48 DEFINE_PER_CPU(int, x2apic_extra_bits);
50 #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
52 static enum uv_system_type uv_system_type;
53 static u64 gru_start_paddr, gru_end_paddr;
54 static union uvh_apicid uvh_apicid;
55 int uv_min_hub_revision_id;
56 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
57 unsigned int uv_apicid_hibits;
58 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
59 static DEFINE_SPINLOCK(uv_nmi_lock);
61 static unsigned long __init uv_early_read_mmr(unsigned long addr)
63 unsigned long val, *mmr;
65 mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
67 early_iounmap(mmr, sizeof(*mmr));
71 static inline bool is_GRU_range(u64 start, u64 end)
73 return start >= gru_start_paddr && end <= gru_end_paddr;
76 static bool uv_is_untracked_pat_range(u64 start, u64 end)
78 return is_ISA_range(start, end) || is_GRU_range(start, end);
81 static int __init early_get_pnodeid(void)
83 union uvh_node_id_u node_id;
84 union uvh_rh_gam_config_mmr_u m_n_config;
87 /* Currently, all blades have same revision number */
88 node_id.v = uv_early_read_mmr(UVH_NODE_ID);
89 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
90 uv_min_hub_revision_id = node_id.s.revision;
92 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
96 static void __init early_get_apic_pnode_shift(void)
98 uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
101 * Old bios, use default value
103 uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
107 * Add an extra bit as dictated by bios to the destination apicid of
108 * interrupts potentially passing through the UV HUB. This prevents
109 * a deadlock between interrupts and IO port operations.
111 static void __init uv_set_apicid_hibit(void)
113 union uvh_lb_target_physical_apic_id_mask_u apicid_mask;
115 apicid_mask.v = uv_early_read_mmr(UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK);
116 uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK;
119 static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
123 if (!strcmp(oem_id, "SGI")) {
124 pnodeid = early_get_pnodeid();
125 early_get_apic_pnode_shift();
126 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
127 x86_platform.nmi_init = uv_nmi_init;
128 if (!strcmp(oem_table_id, "UVL"))
129 uv_system_type = UV_LEGACY_APIC;
130 else if (!strcmp(oem_table_id, "UVX"))
131 uv_system_type = UV_X2APIC;
132 else if (!strcmp(oem_table_id, "UVH")) {
133 __this_cpu_write(x2apic_extra_bits,
134 pnodeid << uvh_apicid.s.pnode_shift);
135 uv_system_type = UV_NON_UNIQUE_APIC;
136 uv_set_apicid_hibit();
143 enum uv_system_type get_uv_system_type(void)
145 return uv_system_type;
148 int is_uv_system(void)
150 return uv_system_type != UV_NONE;
152 EXPORT_SYMBOL_GPL(is_uv_system);
154 DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
155 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
157 struct uv_blade_info *uv_blade_info;
158 EXPORT_SYMBOL_GPL(uv_blade_info);
160 short *uv_node_to_blade;
161 EXPORT_SYMBOL_GPL(uv_node_to_blade);
163 short *uv_cpu_to_blade;
164 EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
166 short uv_possible_blades;
167 EXPORT_SYMBOL_GPL(uv_possible_blades);
169 unsigned long sn_rtc_cycles_per_second;
170 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
172 static const struct cpumask *uv_target_cpus(void)
174 return cpu_online_mask;
177 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
179 cpumask_clear(retmask);
180 cpumask_set_cpu(cpu, retmask);
183 static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
189 pnode = uv_apicid_to_pnode(phys_apicid);
190 phys_apicid |= uv_apicid_hibits;
191 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
192 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
193 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
195 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
198 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
199 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
200 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
202 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
204 atomic_set(&init_deasserted, 1);
209 static void uv_send_IPI_one(int cpu, int vector)
211 unsigned long apicid;
214 apicid = per_cpu(x86_cpu_to_apicid, cpu);
215 pnode = uv_apicid_to_pnode(apicid);
216 uv_hub_send_ipi(pnode, apicid, vector);
219 static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
223 for_each_cpu(cpu, mask)
224 uv_send_IPI_one(cpu, vector);
227 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
229 unsigned int this_cpu = smp_processor_id();
232 for_each_cpu(cpu, mask) {
234 uv_send_IPI_one(cpu, vector);
238 static void uv_send_IPI_allbutself(int vector)
240 unsigned int this_cpu = smp_processor_id();
243 for_each_online_cpu(cpu) {
245 uv_send_IPI_one(cpu, vector);
249 static void uv_send_IPI_all(int vector)
251 uv_send_IPI_mask(cpu_online_mask, vector);
254 static int uv_apic_id_registered(void)
259 static void uv_init_apic_ldr(void)
263 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
266 * We're using fixed IRQ delivery, can only return one phys APIC ID.
267 * May as well be the first.
269 int cpu = cpumask_first(cpumask);
271 if ((unsigned)cpu < nr_cpu_ids)
272 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
278 uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
279 const struct cpumask *andmask)
284 * We're using fixed IRQ delivery, can only return one phys APIC ID.
285 * May as well be the first.
287 for_each_cpu_and(cpu, cpumask, andmask) {
288 if (cpumask_test_cpu(cpu, cpu_online_mask))
291 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
294 static unsigned int x2apic_get_apic_id(unsigned long x)
298 WARN_ON(preemptible() && num_online_cpus() > 1);
299 id = x | __this_cpu_read(x2apic_extra_bits);
304 static unsigned long set_apic_id(unsigned int id)
308 /* maskout x2apic_extra_bits ? */
313 static unsigned int uv_read_apic_id(void)
316 return x2apic_get_apic_id(apic_read(APIC_ID));
319 static int uv_phys_pkg_id(int initial_apicid, int index_msb)
321 return uv_read_apic_id() >> index_msb;
324 static void uv_send_IPI_self(int vector)
326 apic_write(APIC_SELF_IPI, vector);
329 static int uv_probe(void)
331 return apic == &apic_x2apic_uv_x;
334 struct apic __refdata apic_x2apic_uv_x = {
336 .name = "UV large system",
338 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
339 .apic_id_registered = uv_apic_id_registered,
341 .irq_delivery_mode = dest_Fixed,
342 .irq_dest_mode = 0, /* physical */
344 .target_cpus = uv_target_cpus,
346 .dest_logical = APIC_DEST_LOGICAL,
347 .check_apicid_used = NULL,
348 .check_apicid_present = NULL,
350 .vector_allocation_domain = uv_vector_allocation_domain,
351 .init_apic_ldr = uv_init_apic_ldr,
353 .ioapic_phys_id_map = NULL,
354 .setup_apic_routing = NULL,
355 .multi_timer_check = NULL,
356 .cpu_present_to_apicid = default_cpu_present_to_apicid,
357 .apicid_to_cpu_present = NULL,
358 .setup_portio_remap = NULL,
359 .check_phys_apicid_present = default_check_phys_apicid_present,
360 .enable_apic_mode = NULL,
361 .phys_pkg_id = uv_phys_pkg_id,
362 .mps_oem_check = NULL,
364 .get_apic_id = x2apic_get_apic_id,
365 .set_apic_id = set_apic_id,
366 .apic_id_mask = 0xFFFFFFFFu,
368 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
369 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
371 .send_IPI_mask = uv_send_IPI_mask,
372 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
373 .send_IPI_allbutself = uv_send_IPI_allbutself,
374 .send_IPI_all = uv_send_IPI_all,
375 .send_IPI_self = uv_send_IPI_self,
377 .wakeup_secondary_cpu = uv_wakeup_secondary,
378 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
379 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
380 .wait_for_init_deassert = NULL,
381 .smp_callin_clear_local_apic = NULL,
382 .inquire_remote_apic = NULL,
384 .read = native_apic_msr_read,
385 .write = native_apic_msr_write,
386 .icr_read = native_x2apic_icr_read,
387 .icr_write = native_x2apic_icr_write,
388 .wait_icr_idle = native_x2apic_wait_icr_idle,
389 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
392 static __cpuinit void set_x2apic_extra_bits(int pnode)
394 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
398 * Called on boot cpu.
400 static __init int boot_pnode_to_blade(int pnode)
404 for (blade = 0; blade < uv_num_possible_blades(); blade++)
405 if (pnode == uv_blade_info[blade].pnode)
411 unsigned long redirect;
415 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
417 static __initdata struct redir_addr redir_addrs[] = {
418 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
419 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
420 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
423 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
425 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
426 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
429 for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
430 alias.v = uv_read_local_mmr(redir_addrs[i].alias);
431 if (alias.s.enable && alias.s.base == 0) {
432 *size = (1UL << alias.s.m_alias);
433 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
434 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
441 enum map_type {map_wb, map_uc};
443 static __init void map_high(char *id, unsigned long base, int pshift,
444 int bshift, int max_pnode, enum map_type map_type)
446 unsigned long bytes, paddr;
448 paddr = base << pshift;
449 bytes = (1UL << bshift) * (max_pnode + 1);
450 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
452 if (map_type == map_uc)
453 init_extra_mapping_uc(paddr, bytes);
455 init_extra_mapping_wb(paddr, bytes);
458 static __init void map_gru_high(int max_pnode)
460 union uvh_rh_gam_gru_overlay_config_mmr_u gru;
461 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
463 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
465 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
466 gru_start_paddr = ((u64)gru.s.base << shift);
467 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
472 static __init void map_mmr_high(int max_pnode)
474 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
475 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
477 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
479 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
482 static __init void map_mmioh_high(int max_pnode)
484 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
485 int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
487 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
489 map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
493 static __init void map_low_mmrs(void)
495 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
496 init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
499 static __init void uv_rtc_init(void)
504 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
506 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
508 "unable to determine platform RTC clock frequency, "
510 /* BIOS gives wrong value for clock freq. so guess */
511 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
513 sn_rtc_cycles_per_second = ticks_per_sec;
517 * percpu heartbeat timer
519 static void uv_heartbeat(unsigned long ignored)
521 struct timer_list *timer = &uv_hub_info->scir.timer;
522 unsigned char bits = uv_hub_info->scir.state;
524 /* flip heartbeat bit */
525 bits ^= SCIR_CPU_HEARTBEAT;
527 /* is this cpu idle? */
528 if (idle_cpu(raw_smp_processor_id()))
529 bits &= ~SCIR_CPU_ACTIVITY;
531 bits |= SCIR_CPU_ACTIVITY;
533 /* update system controller interface reg */
534 uv_set_scir_bits(bits);
536 /* enable next timer period */
537 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
540 static void __cpuinit uv_heartbeat_enable(int cpu)
542 while (!uv_cpu_hub_info(cpu)->scir.enabled) {
543 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
545 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
546 setup_timer(timer, uv_heartbeat, cpu);
547 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
548 add_timer_on(timer, cpu);
549 uv_cpu_hub_info(cpu)->scir.enabled = 1;
551 /* also ensure that boot cpu is enabled */
556 #ifdef CONFIG_HOTPLUG_CPU
557 static void __cpuinit uv_heartbeat_disable(int cpu)
559 if (uv_cpu_hub_info(cpu)->scir.enabled) {
560 uv_cpu_hub_info(cpu)->scir.enabled = 0;
561 del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
563 uv_set_cpu_scir_bits(cpu, 0xff);
567 * cpu hotplug notifier
569 static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
570 unsigned long action, void *hcpu)
572 long cpu = (long)hcpu;
576 uv_heartbeat_enable(cpu);
578 case CPU_DOWN_PREPARE:
579 uv_heartbeat_disable(cpu);
587 static __init void uv_scir_register_cpu_notifier(void)
589 hotcpu_notifier(uv_scir_cpu_notify, 0);
592 #else /* !CONFIG_HOTPLUG_CPU */
594 static __init void uv_scir_register_cpu_notifier(void)
598 static __init int uv_init_heartbeat(void)
603 for_each_online_cpu(cpu)
604 uv_heartbeat_enable(cpu);
608 late_initcall(uv_init_heartbeat);
610 #endif /* !CONFIG_HOTPLUG_CPU */
612 /* Direct Legacy VGA I/O traffic to designated IOH */
613 int uv_set_vga_state(struct pci_dev *pdev, bool decode,
614 unsigned int command_bits, bool change_bridge)
618 PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n",
619 pdev->devfn, decode, command_bits, change_bridge);
624 if ((command_bits & PCI_COMMAND_IO) == 0)
627 domain = pci_domain_nr(pdev->bus);
628 bus = pdev->bus->number;
630 rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
631 PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
637 * Called on each cpu to initialize the per_cpu UV data area.
638 * FIXME: hotplug not supported yet
640 void __cpuinit uv_cpu_init(void)
642 /* CPU 0 initilization will be done via uv_system_init. */
646 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
648 if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
649 set_x2apic_extra_bits(uv_hub_info->pnode);
653 * When NMI is received, print a stack trace.
655 int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
657 unsigned long real_uv_nmi;
660 if (reason != DIE_NMIUNKNOWN)
664 /* do nothing if entering the crash kernel */
668 * Each blade has an MMR that indicates when an NMI has been sent
669 * to cpus on the blade. If an NMI is detected, atomically
670 * clear the MMR and update a per-blade NMI count used to
671 * cause each cpu on the blade to notice a new NMI.
673 bid = uv_numa_blade_id();
674 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
676 if (unlikely(real_uv_nmi)) {
677 spin_lock(&uv_blade_info[bid].nmi_lock);
678 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
680 uv_blade_info[bid].nmi_count++;
681 uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
683 spin_unlock(&uv_blade_info[bid].nmi_lock);
686 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
689 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
692 * Use a lock so only one cpu prints at a time.
693 * This prevents intermixed output.
695 spin_lock(&uv_nmi_lock);
696 pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
698 spin_unlock(&uv_nmi_lock);
703 static struct notifier_block uv_dump_stack_nmi_nb = {
704 .notifier_call = uv_handle_nmi,
705 .priority = NMI_LOCAL_LOW_PRIOR - 1,
708 void uv_register_nmi_notifier(void)
710 if (register_die_notifier(&uv_dump_stack_nmi_nb))
711 printk(KERN_WARNING "UV NMI handler failed to register\n");
714 void uv_nmi_init(void)
719 * Unmask NMI on all cpus
721 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
722 value &= ~APIC_LVT_MASKED;
723 apic_write(APIC_LVT1, value);
726 void __init uv_system_init(void)
728 union uvh_rh_gam_config_mmr_u m_n_config;
729 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
730 union uvh_node_id_u node_id;
731 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
732 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io;
733 int gnode_extra, max_pnode = 0;
734 unsigned long mmr_base, present, paddr;
735 unsigned short pnode_mask, pnode_io_mask;
739 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
740 m_val = m_n_config.s.m_skt;
741 n_val = m_n_config.s.n_skt;
742 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
745 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
747 pnode_mask = (1 << n_val) - 1;
748 pnode_io_mask = (1 << n_io) - 1;
750 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
751 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
752 gnode_upper = ((unsigned long)gnode_extra << m_val);
753 printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n",
754 n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask);
756 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
758 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
759 uv_possible_blades +=
760 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
761 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
763 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
764 uv_blade_info = kzalloc(bytes, GFP_KERNEL);
765 BUG_ON(!uv_blade_info);
767 for (blade = 0; blade < uv_num_possible_blades(); blade++)
768 uv_blade_info[blade].memory_nid = -1;
770 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
772 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
773 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
774 BUG_ON(!uv_node_to_blade);
775 memset(uv_node_to_blade, 255, bytes);
777 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
778 uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
779 BUG_ON(!uv_cpu_to_blade);
780 memset(uv_cpu_to_blade, 255, bytes);
783 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
784 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
785 for (j = 0; j < 64; j++) {
786 if (!test_bit(j, &present))
788 pnode = (i * 64 + j) & pnode_mask;
789 uv_blade_info[blade].pnode = pnode;
790 uv_blade_info[blade].nr_possible_cpus = 0;
791 uv_blade_info[blade].nr_online_cpus = 0;
792 spin_lock_init(&uv_blade_info[blade].nmi_lock);
793 max_pnode = max(pnode, max_pnode);
799 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
800 &sn_region_size, &system_serial_number);
803 for_each_present_cpu(cpu) {
804 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
806 nid = cpu_to_node(cpu);
808 * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
810 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
811 uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
812 pnode = uv_apicid_to_pnode(apicid);
813 blade = boot_pnode_to_blade(pnode);
814 lcpu = uv_blade_info[blade].nr_possible_cpus;
815 uv_blade_info[blade].nr_possible_cpus++;
817 /* Any node on the blade, else will contain -1. */
818 uv_blade_info[blade].memory_nid = nid;
820 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
821 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
822 uv_cpu_hub_info(cpu)->m_val = m_val;
823 uv_cpu_hub_info(cpu)->n_val = n_val;
824 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
825 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
826 uv_cpu_hub_info(cpu)->pnode = pnode;
827 uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
828 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
829 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
830 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
831 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
832 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
833 uv_node_to_blade[nid] = blade;
834 uv_cpu_to_blade[cpu] = blade;
837 /* Add blade/pnode info for nodes without cpus */
838 for_each_online_node(nid) {
839 if (uv_node_to_blade[nid] >= 0)
841 paddr = node_start_pfn(nid) << PAGE_SHIFT;
842 paddr = uv_soc_phys_ram_to_gpa(paddr);
843 pnode = (paddr >> m_val) & pnode_mask;
844 blade = boot_pnode_to_blade(pnode);
845 uv_node_to_blade[nid] = blade;
848 map_gru_high(max_pnode);
849 map_mmr_high(max_pnode);
850 map_mmioh_high(max_pnode & pnode_io_mask);
853 uv_scir_register_cpu_notifier();
854 uv_register_nmi_notifier();
855 proc_mkdir("sgi_uv", NULL);
857 /* register Legacy VGA I/O redirection handler */
858 pci_register_set_vga_state(uv_set_vga_state);
861 * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
862 * EFI is not enabled in the kdump kernel.
864 if (is_kdump_kernel())
865 reboot_type = BOOT_ACPI;
868 apic_driver(apic_x2apic_uv_x);