x86, apic: clean up ->cpu_present_to_apicid()
[linux-2.6-block.git] / arch / x86 / kernel / genx2apic_uv_x.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * SGI UV APIC functions (note: not an Intel compatible APIC)
7  *
8  * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/threads.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/string.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/module.h>
20 #include <linux/hardirq.h>
21 #include <linux/timer.h>
22 #include <linux/proc_fs.h>
23 #include <asm/current.h>
24 #include <asm/smp.h>
25 #include <asm/ipi.h>
26 #include <asm/genapic.h>
27 #include <asm/pgtable.h>
28 #include <asm/uv/uv.h>
29 #include <asm/uv/uv_mmrs.h>
30 #include <asm/uv/uv_hub.h>
31 #include <asm/uv/bios.h>
32
33 DEFINE_PER_CPU(int, x2apic_extra_bits);
34
35 static enum uv_system_type uv_system_type;
36
37 static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38 {
39         if (!strcmp(oem_id, "SGI")) {
40                 if (!strcmp(oem_table_id, "UVL"))
41                         uv_system_type = UV_LEGACY_APIC;
42                 else if (!strcmp(oem_table_id, "UVX"))
43                         uv_system_type = UV_X2APIC;
44                 else if (!strcmp(oem_table_id, "UVH")) {
45                         uv_system_type = UV_NON_UNIQUE_APIC;
46                         return 1;
47                 }
48         }
49         return 0;
50 }
51
52 enum uv_system_type get_uv_system_type(void)
53 {
54         return uv_system_type;
55 }
56
57 int is_uv_system(void)
58 {
59         return uv_system_type != UV_NONE;
60 }
61 EXPORT_SYMBOL_GPL(is_uv_system);
62
63 DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
64 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
65
66 struct uv_blade_info *uv_blade_info;
67 EXPORT_SYMBOL_GPL(uv_blade_info);
68
69 short *uv_node_to_blade;
70 EXPORT_SYMBOL_GPL(uv_node_to_blade);
71
72 short *uv_cpu_to_blade;
73 EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
74
75 short uv_possible_blades;
76 EXPORT_SYMBOL_GPL(uv_possible_blades);
77
78 unsigned long sn_rtc_cycles_per_second;
79 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
80
81 /* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
82
83 static const struct cpumask *uv_target_cpus(void)
84 {
85         return cpumask_of(0);
86 }
87
88 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
89 {
90         cpumask_clear(retmask);
91         cpumask_set_cpu(cpu, retmask);
92 }
93
94 int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
95 {
96         unsigned long val;
97         int pnode;
98
99         pnode = uv_apicid_to_pnode(phys_apicid);
100         val = (1UL << UVH_IPI_INT_SEND_SHFT) |
101             (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
102             (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
103             APIC_DM_INIT;
104         uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
105         mdelay(10);
106
107         val = (1UL << UVH_IPI_INT_SEND_SHFT) |
108             (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
109             (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
110             APIC_DM_STARTUP;
111         uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
112         return 0;
113 }
114
115 static void uv_send_IPI_one(int cpu, int vector)
116 {
117         unsigned long val, apicid, lapicid;
118         int pnode;
119
120         apicid = per_cpu(x86_cpu_to_apicid, cpu);
121         lapicid = apicid & 0x3f;                /* ZZZ macro needed */
122         pnode = uv_apicid_to_pnode(apicid);
123         val =
124             (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
125                                               UVH_IPI_INT_APIC_ID_SHFT) |
126             (vector << UVH_IPI_INT_VECTOR_SHFT);
127         uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
128 }
129
130 static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
131 {
132         unsigned int cpu;
133
134         for_each_cpu(cpu, mask)
135                 uv_send_IPI_one(cpu, vector);
136 }
137
138 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
139 {
140         unsigned int cpu;
141         unsigned int this_cpu = smp_processor_id();
142
143         for_each_cpu(cpu, mask)
144                 if (cpu != this_cpu)
145                         uv_send_IPI_one(cpu, vector);
146 }
147
148 static void uv_send_IPI_allbutself(int vector)
149 {
150         unsigned int cpu;
151         unsigned int this_cpu = smp_processor_id();
152
153         for_each_online_cpu(cpu)
154                 if (cpu != this_cpu)
155                         uv_send_IPI_one(cpu, vector);
156 }
157
158 static void uv_send_IPI_all(int vector)
159 {
160         uv_send_IPI_mask(cpu_online_mask, vector);
161 }
162
163 static int uv_apic_id_registered(void)
164 {
165         return 1;
166 }
167
168 static void uv_init_apic_ldr(void)
169 {
170 }
171
172 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
173 {
174         int cpu;
175
176         /*
177          * We're using fixed IRQ delivery, can only return one phys APIC ID.
178          * May as well be the first.
179          */
180         cpu = cpumask_first(cpumask);
181         if ((unsigned)cpu < nr_cpu_ids)
182                 return per_cpu(x86_cpu_to_apicid, cpu);
183         else
184                 return BAD_APICID;
185 }
186
187 static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
188                                               const struct cpumask *andmask)
189 {
190         int cpu;
191
192         /*
193          * We're using fixed IRQ delivery, can only return one phys APIC ID.
194          * May as well be the first.
195          */
196         for_each_cpu_and(cpu, cpumask, andmask)
197                 if (cpumask_test_cpu(cpu, cpu_online_mask))
198                         break;
199         if (cpu < nr_cpu_ids)
200                 return per_cpu(x86_cpu_to_apicid, cpu);
201         return BAD_APICID;
202 }
203
204 static unsigned int get_apic_id(unsigned long x)
205 {
206         unsigned int id;
207
208         WARN_ON(preemptible() && num_online_cpus() > 1);
209         id = x | __get_cpu_var(x2apic_extra_bits);
210
211         return id;
212 }
213
214 static unsigned long set_apic_id(unsigned int id)
215 {
216         unsigned long x;
217
218         /* maskout x2apic_extra_bits ? */
219         x = id;
220         return x;
221 }
222
223 static unsigned int uv_read_apic_id(void)
224 {
225
226         return get_apic_id(apic_read(APIC_ID));
227 }
228
229 static unsigned int phys_pkg_id(int index_msb)
230 {
231         return uv_read_apic_id() >> index_msb;
232 }
233
234 static void uv_send_IPI_self(int vector)
235 {
236         apic_write(APIC_SELF_IPI, vector);
237 }
238
239 struct genapic apic_x2apic_uv_x = {
240
241         .name                           = "UV large system",
242         .probe                          = NULL,
243         .acpi_madt_oem_check            = uv_acpi_madt_oem_check,
244         .apic_id_registered             = uv_apic_id_registered,
245
246         .irq_delivery_mode              = dest_Fixed,
247         .irq_dest_mode                  = 1, /* logical */
248
249         .target_cpus                    = uv_target_cpus,
250         .disable_esr                    = 0,
251         .dest_logical                   = APIC_DEST_LOGICAL,
252         .check_apicid_used              = NULL,
253         .check_apicid_present           = NULL,
254
255         .vector_allocation_domain       = uv_vector_allocation_domain,
256         .init_apic_ldr                  = uv_init_apic_ldr,
257
258         .ioapic_phys_id_map             = NULL,
259         .setup_apic_routing             = NULL,
260         .multi_timer_check              = NULL,
261         .apicid_to_node                 = NULL,
262         .cpu_to_logical_apicid          = NULL,
263         .cpu_present_to_apicid          = default_cpu_present_to_apicid,
264         .apicid_to_cpu_present          = NULL,
265         .setup_portio_remap             = NULL,
266         .check_phys_apicid_present      = NULL,
267         .enable_apic_mode               = NULL,
268         .phys_pkg_id                    = phys_pkg_id,
269         .mps_oem_check                  = NULL,
270
271         .get_apic_id                    = get_apic_id,
272         .set_apic_id                    = set_apic_id,
273         .apic_id_mask                   = 0xFFFFFFFFu,
274
275         .cpu_mask_to_apicid             = uv_cpu_mask_to_apicid,
276         .cpu_mask_to_apicid_and         = uv_cpu_mask_to_apicid_and,
277
278         .send_IPI_mask                  = uv_send_IPI_mask,
279         .send_IPI_mask_allbutself       = uv_send_IPI_mask_allbutself,
280         .send_IPI_allbutself            = uv_send_IPI_allbutself,
281         .send_IPI_all                   = uv_send_IPI_all,
282         .send_IPI_self                  = uv_send_IPI_self,
283
284         .wakeup_cpu                     = NULL,
285         .trampoline_phys_low            = 0,
286         .trampoline_phys_high           = 0,
287         .wait_for_init_deassert         = NULL,
288         .smp_callin_clear_local_apic    = NULL,
289         .store_NMI_vector               = NULL,
290         .restore_NMI_vector             = NULL,
291         .inquire_remote_apic            = NULL,
292 };
293
294 static __cpuinit void set_x2apic_extra_bits(int pnode)
295 {
296         __get_cpu_var(x2apic_extra_bits) = (pnode << 6);
297 }
298
299 /*
300  * Called on boot cpu.
301  */
302 static __init int boot_pnode_to_blade(int pnode)
303 {
304         int blade;
305
306         for (blade = 0; blade < uv_num_possible_blades(); blade++)
307                 if (pnode == uv_blade_info[blade].pnode)
308                         return blade;
309         BUG();
310 }
311
312 struct redir_addr {
313         unsigned long redirect;
314         unsigned long alias;
315 };
316
317 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
318
319 static __initdata struct redir_addr redir_addrs[] = {
320         {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
321         {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
322         {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
323 };
324
325 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
326 {
327         union uvh_si_alias0_overlay_config_u alias;
328         union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
329         int i;
330
331         for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
332                 alias.v = uv_read_local_mmr(redir_addrs[i].alias);
333                 if (alias.s.base == 0) {
334                         *size = (1UL << alias.s.m_alias);
335                         redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
336                         *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
337                         return;
338                 }
339         }
340         BUG();
341 }
342
343 static __init void map_low_mmrs(void)
344 {
345         init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
346         init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
347 }
348
349 enum map_type {map_wb, map_uc};
350
351 static __init void map_high(char *id, unsigned long base, int shift,
352                             int max_pnode, enum map_type map_type)
353 {
354         unsigned long bytes, paddr;
355
356         paddr = base << shift;
357         bytes = (1UL << shift) * (max_pnode + 1);
358         printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
359                                                 paddr + bytes);
360         if (map_type == map_uc)
361                 init_extra_mapping_uc(paddr, bytes);
362         else
363                 init_extra_mapping_wb(paddr, bytes);
364
365 }
366 static __init void map_gru_high(int max_pnode)
367 {
368         union uvh_rh_gam_gru_overlay_config_mmr_u gru;
369         int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
370
371         gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
372         if (gru.s.enable)
373                 map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
374 }
375
376 static __init void map_config_high(int max_pnode)
377 {
378         union uvh_rh_gam_cfg_overlay_config_mmr_u cfg;
379         int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT;
380
381         cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
382         if (cfg.s.enable)
383                 map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc);
384 }
385
386 static __init void map_mmr_high(int max_pnode)
387 {
388         union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
389         int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
390
391         mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
392         if (mmr.s.enable)
393                 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
394 }
395
396 static __init void map_mmioh_high(int max_pnode)
397 {
398         union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
399         int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
400
401         mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
402         if (mmioh.s.enable)
403                 map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc);
404 }
405
406 static __init void uv_rtc_init(void)
407 {
408         long status;
409         u64 ticks_per_sec;
410
411         status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
412                                         &ticks_per_sec);
413         if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
414                 printk(KERN_WARNING
415                         "unable to determine platform RTC clock frequency, "
416                         "guessing.\n");
417                 /* BIOS gives wrong value for clock freq. so guess */
418                 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
419         } else
420                 sn_rtc_cycles_per_second = ticks_per_sec;
421 }
422
423 /*
424  * percpu heartbeat timer
425  */
426 static void uv_heartbeat(unsigned long ignored)
427 {
428         struct timer_list *timer = &uv_hub_info->scir.timer;
429         unsigned char bits = uv_hub_info->scir.state;
430
431         /* flip heartbeat bit */
432         bits ^= SCIR_CPU_HEARTBEAT;
433
434         /* is this cpu idle? */
435         if (idle_cpu(raw_smp_processor_id()))
436                 bits &= ~SCIR_CPU_ACTIVITY;
437         else
438                 bits |= SCIR_CPU_ACTIVITY;
439
440         /* update system controller interface reg */
441         uv_set_scir_bits(bits);
442
443         /* enable next timer period */
444         mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
445 }
446
447 static void __cpuinit uv_heartbeat_enable(int cpu)
448 {
449         if (!uv_cpu_hub_info(cpu)->scir.enabled) {
450                 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
451
452                 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
453                 setup_timer(timer, uv_heartbeat, cpu);
454                 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
455                 add_timer_on(timer, cpu);
456                 uv_cpu_hub_info(cpu)->scir.enabled = 1;
457         }
458
459         /* check boot cpu */
460         if (!uv_cpu_hub_info(0)->scir.enabled)
461                 uv_heartbeat_enable(0);
462 }
463
464 #ifdef CONFIG_HOTPLUG_CPU
465 static void __cpuinit uv_heartbeat_disable(int cpu)
466 {
467         if (uv_cpu_hub_info(cpu)->scir.enabled) {
468                 uv_cpu_hub_info(cpu)->scir.enabled = 0;
469                 del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
470         }
471         uv_set_cpu_scir_bits(cpu, 0xff);
472 }
473
474 /*
475  * cpu hotplug notifier
476  */
477 static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
478                                        unsigned long action, void *hcpu)
479 {
480         long cpu = (long)hcpu;
481
482         switch (action) {
483         case CPU_ONLINE:
484                 uv_heartbeat_enable(cpu);
485                 break;
486         case CPU_DOWN_PREPARE:
487                 uv_heartbeat_disable(cpu);
488                 break;
489         default:
490                 break;
491         }
492         return NOTIFY_OK;
493 }
494
495 static __init void uv_scir_register_cpu_notifier(void)
496 {
497         hotcpu_notifier(uv_scir_cpu_notify, 0);
498 }
499
500 #else /* !CONFIG_HOTPLUG_CPU */
501
502 static __init void uv_scir_register_cpu_notifier(void)
503 {
504 }
505
506 static __init int uv_init_heartbeat(void)
507 {
508         int cpu;
509
510         if (is_uv_system())
511                 for_each_online_cpu(cpu)
512                         uv_heartbeat_enable(cpu);
513         return 0;
514 }
515
516 late_initcall(uv_init_heartbeat);
517
518 #endif /* !CONFIG_HOTPLUG_CPU */
519
520 /*
521  * Called on each cpu to initialize the per_cpu UV data area.
522  *      ZZZ hotplug not supported yet
523  */
524 void __cpuinit uv_cpu_init(void)
525 {
526         /* CPU 0 initilization will be done via uv_system_init. */
527         if (!uv_blade_info)
528                 return;
529
530         uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
531
532         if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
533                 set_x2apic_extra_bits(uv_hub_info->pnode);
534 }
535
536
537 void __init uv_system_init(void)
538 {
539         union uvh_si_addr_map_config_u m_n_config;
540         union uvh_node_id_u node_id;
541         unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
542         int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
543         int max_pnode = 0;
544         unsigned long mmr_base, present;
545
546         map_low_mmrs();
547
548         m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
549         m_val = m_n_config.s.m_skt;
550         n_val = m_n_config.s.n_skt;
551         mmr_base =
552             uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
553             ~UV_MMR_ENABLE;
554         printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
555
556         for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
557                 uv_possible_blades +=
558                   hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
559         printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
560
561         bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
562         uv_blade_info = kmalloc(bytes, GFP_KERNEL);
563
564         get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
565
566         bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
567         uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
568         memset(uv_node_to_blade, 255, bytes);
569
570         bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
571         uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
572         memset(uv_cpu_to_blade, 255, bytes);
573
574         blade = 0;
575         for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
576                 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
577                 for (j = 0; j < 64; j++) {
578                         if (!test_bit(j, &present))
579                                 continue;
580                         uv_blade_info[blade].pnode = (i * 64 + j);
581                         uv_blade_info[blade].nr_possible_cpus = 0;
582                         uv_blade_info[blade].nr_online_cpus = 0;
583                         blade++;
584                 }
585         }
586
587         node_id.v = uv_read_local_mmr(UVH_NODE_ID);
588         gnode_upper = (((unsigned long)node_id.s.node_id) &
589                        ~((1 << n_val) - 1)) << m_val;
590
591         uv_bios_init();
592         uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
593                             &sn_coherency_id, &sn_region_size);
594         uv_rtc_init();
595
596         for_each_present_cpu(cpu) {
597                 nid = cpu_to_node(cpu);
598                 pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
599                 blade = boot_pnode_to_blade(pnode);
600                 lcpu = uv_blade_info[blade].nr_possible_cpus;
601                 uv_blade_info[blade].nr_possible_cpus++;
602
603                 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
604                 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
605                 uv_cpu_hub_info(cpu)->m_val = m_val;
606                 uv_cpu_hub_info(cpu)->n_val = m_val;
607                 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
608                 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
609                 uv_cpu_hub_info(cpu)->pnode = pnode;
610                 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1;
611                 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
612                 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
613                 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
614                 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
615                 uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
616                 uv_node_to_blade[nid] = blade;
617                 uv_cpu_to_blade[cpu] = blade;
618                 max_pnode = max(pnode, max_pnode);
619
620                 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
621                         "lcpu %d, blade %d\n",
622                         cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
623                         lcpu, blade);
624         }
625
626         map_gru_high(max_pnode);
627         map_mmr_high(max_pnode);
628         map_config_high(max_pnode);
629         map_mmioh_high(max_pnode);
630
631         uv_cpu_init();
632         uv_scir_register_cpu_notifier();
633         proc_mkdir("sgi_uv", NULL);
634 }