powerpc: Shield code specific to 64-bit server processors
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Tue, 2 Jun 2009 21:17:45 +0000 (21:17 +0000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Tue, 9 Jun 2009 06:47:38 +0000 (16:47 +1000)
This is a random collection of added ifdef's around portions of
code that only mak sense on server processors. Using either
CONFIG_PPC_STD_MMU_64 or CONFIG_PPC_BOOK3S as seems appropriate.

This is meant to make the future merging of Book3E 64-bit support
easier.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/include/asm/lppaca.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/pgtable-ppc64.h
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/mm/Makefile
arch/powerpc/mm/init_64.c

index d2a65e8ca6ae08df08c17626dfa4acaf2d6103d9..f78f65c38f054d8d7f56fafee268c33d793a62c4 100644 (file)
 #define _ASM_POWERPC_LPPACA_H
 #ifdef __KERNEL__
 
+/* These definitions relate to hypervisors that only exist when using
+ * a server type processor
+ */
+#ifdef CONFIG_PPC_BOOK3S
+
 //=============================================================================
 //
 //     This control block contains the data that is shared between the
@@ -158,5 +163,6 @@ struct slb_shadow {
 
 extern struct slb_shadow slb_shadow[];
 
+#endif /* CONFIG_PPC_BOOK3S */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_LPPACA_H */
index 325b7208a146d6ee3c4c050309d2a603500874ce..fb57ded592f9f1b8ccdb6affbd245e7425159309 100644 (file)
@@ -74,10 +74,10 @@ extern void early_init_mmu_secondary(void);
 #endif /* !__ASSEMBLY__ */
 
 
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC_STD_MMU_64)
 /* 64-bit classic hash table MMU */
 #  include <asm/mmu-hash64.h>
-#elif defined(CONFIG_PPC_STD_MMU)
+#elif defined(CONFIG_PPC_STD_MMU_32)
 /* 32-bit classic hash table MMU */
 #  include <asm/mmu-hash32.h>
 #elif defined(CONFIG_40x)
index c40db05f21e000a3682bf5a828ede9612cb710ec..8cd083c6150384eeca4c28a38d5a49e643efab1a 100644 (file)
 #error TASK_SIZE_USER64 exceeds pagetable range
 #endif
 
+#ifdef CONFIG_PPC_STD_MMU_64
 #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
 #error TASK_SIZE_USER64 exceeds user VSID range
 #endif
+#endif
 
 /*
  * Define the address range of the vmalloc VM area.
@@ -199,8 +201,11 @@ static inline unsigned long pte_update(struct mm_struct *mm,
        if (!huge)
                assert_pte_locked(mm, addr);
 
+#ifdef CONFIG_PPC_STD_MMU_64
        if (old & _PAGE_HASHPTE)
                hpte_need_flush(mm, addr, ptep, old, huge);
+#endif
+
        return old;
 }
 
index 7d46e5d5b20742e8b9e7c55b125309156cc43e01..8564a412e7a66f28b0403007f7a6615d23167816 100644 (file)
@@ -117,6 +117,7 @@ notrace void raw_local_irq_restore(unsigned long en)
        if (!en)
                return;
 
+#ifdef CONFIG_PPC_STD_MMU_64
        if (firmware_has_feature(FW_FEATURE_ISERIES)) {
                /*
                 * Do we need to disable preemption here?  Not really: in the
@@ -134,6 +135,7 @@ notrace void raw_local_irq_restore(unsigned long en)
                if (local_paca->lppaca_ptr->int_dword.any_int)
                        iseries_handle_interrupts();
        }
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
        /*
         * if (get_paca()->hard_enabled) return;
index dd6c7a3bf72cb465baf673a5a0a373d8b1829d15..461c91625a80674f50b5615cea8809b602a37543 100644 (file)
@@ -420,6 +420,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
         * so flushing the hash table is the only sane way to make sure
         * that no hash entries are covering that removed bridge area
         * while still allowing other busses overlapping those pages
+        *
+        * Note: If we ever support P2P hotplug on Book3E, we'll have
+        * to do an appropriate TLB flush here too
         */
        if (bus->self) {
                struct resource *res = bus->resource[0];
@@ -427,8 +430,10 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
                pr_debug("IO unmapping for PCI-PCI bridge %s\n",
                         pci_name(bus->self));
 
+#ifdef CONFIG_PPC_STD_MMU_64
                __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
                                         res->end + _IO_BASE + 1);
+#endif
                return 0;
        }
 
index 7b44a33f03c230a07381f876f25da1fdb7871c6a..3e7135bbe40f648c9a76c82b57fbd72a4e2aba95 100644 (file)
@@ -650,7 +650,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
                                _ALIGN_UP(sizeof(struct thread_info), 16);
 
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
        if (cpu_has_feature(CPU_FTR_SLB)) {
                unsigned long sp_vsid;
                unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
index ce01ff2474da8a68785e72f9246f9de9de86ce16..d4405b95bfaa708b4511e259d6a5e6b1a73582d9 100644 (file)
@@ -585,7 +585,7 @@ static void __init check_cpu_pa_features(unsigned long node)
                      ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
 }
 
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
 static void __init check_cpu_slb_size(unsigned long node)
 {
        u32 *slb_size_ptr;
index c410c606955da7770acacbaa1737a163db4c749d..42221055f0c445e3af966fabd543f014e7e5394f 100644 (file)
@@ -417,9 +417,11 @@ void __init setup_system(void)
        if (ppc64_caches.iline_size != 0x80)
                printk("ppc64_caches.icache_line_size = 0x%x\n",
                       ppc64_caches.iline_size);
+#ifdef CONFIG_PPC_STD_MMU_64
        if (htab_address)
                printk("htab_address                  = 0x%p\n", htab_address);
        printk("htab_hash_mask                = 0x%lx\n", htab_hash_mask);
+#endif /* CONFIG_PPC_STD_MMU_64 */
        if (PHYSICAL_START > 0)
                printk("physical_start                = 0x%lx\n",
                       PHYSICAL_START);
@@ -511,8 +513,9 @@ void __init setup_arch(char **cmdline_p)
        irqstack_early_init();
        emergency_stack_init();
 
+#ifdef CONFIG_PPC_STD_MMU_64
        stabs_alloc();
-
+#endif
        /* set up the bootmem stuff with available memory */
        do_init_bootmem();
        sparse_init();
index b746f4ca4209aaa7c385272b098804bbbd7dcb0d..c4bcf072cb3ca5de9ccbec1098eeb5ca067e713b 100644 (file)
@@ -11,10 +11,11 @@ obj-y                               := fault.o mem.o pgtable.o gup.o \
                                   pgtable_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_PPC_MMU_NOHASH)   += mmu_context_nohash.o tlb_nohash.o \
                                   tlb_nohash_low.o
-hash-$(CONFIG_PPC_NATIVE)      := hash_native_64.o
-obj-$(CONFIG_PPC64)            += hash_utils_64.o \
+obj-$(CONFIG_PPC64)            += mmap_64.o
+hash64-$(CONFIG_PPC_NATIVE)    := hash_native_64.o
+obj-$(CONFIG_PPC_STD_MMU_64)   += hash_utils_64.o \
                                   slb_low.o slb.o stab.o \
-                                  mmap_64.o $(hash-y)
+                                  mmap_64.o $(hash64-y)
 obj-$(CONFIG_PPC_STD_MMU_32)   += ppc_mmu_32.o
 obj-$(CONFIG_PPC_STD_MMU)      += hash_low_$(CONFIG_WORD_SIZE).o \
                                   tlb_hash$(CONFIG_WORD_SIZE).o \
index 3e6a6543f53a9fa193d49081d1d6fcb3b8353776..68a821add28df21c09c2716cc2cb8ef5c77dc796 100644 (file)
@@ -66,6 +66,7 @@
 
 #include "mmu_decl.h"
 
+#ifdef CONFIG_PPC_STD_MMU_64
 #if PGTABLE_RANGE > USER_VSID_RANGE
 #warning Limited user VSID range means pagetable space is wasted
 #endif
@@ -73,6 +74,7 @@
 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
 #warning TASK_SIZE is smaller than it needs to be.
 #endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
 phys_addr_t memstart_addr = ~0;
 phys_addr_t kernstart_addr;