powerpc/mm/hash: Rename KERNEL_REGION_ID to LINEAR_MAP_REGION_ID
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Wed, 17 Apr 2019 12:59:19 +0000 (18:29 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 21 Apr 2019 13:12:40 +0000 (23:12 +1000)
The region actually point to linear map. Rename the #define to
clarify thati.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/mm/copro_fault.c
arch/powerpc/mm/slb.c
arch/powerpc/platforms/cell/spu_base.c

index 7faa3d7214c04a74c3331273cd51ec27524cabf9..1d1183048cfd737d8ed83607a83606af2568738c 100644 (file)
@@ -89,7 +89,7 @@
  * Region IDs
  */
 #define USER_REGION_ID         0
-#define KERNEL_REGION_ID       1
+#define LINEAR_MAP_REGION_ID   1
 #define VMALLOC_REGION_ID      NON_LINEAR_REGION_ID(H_VMALLOC_START)
 #define IO_REGION_ID           NON_LINEAR_REGION_ID(H_KERN_IO_START)
 #define VMEMMAP_REGION_ID      NON_LINEAR_REGION_ID(H_VMEMMAP_START)
@@ -120,7 +120,7 @@ static inline int get_region_id(unsigned long ea)
                return USER_REGION_ID;
 
        if (ea < H_KERN_VIRT_START)
-               return KERNEL_REGION_ID;
+               return LINEAR_MAP_REGION_ID;
 
        VM_BUG_ON(id != 0xc);
        BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
index 9a9adbeef0706165f8bf53e0cb95ff588e6f0c15..1e4705516a54f8474c517e358944b3a1bb738556 100644 (file)
@@ -817,7 +817,7 @@ static inline unsigned long get_kernel_context(unsigned long ea)
         * Depending on Kernel config, kernel region can have one context
         * or more.
         */
-       if (region_id == KERNEL_REGION_ID) {
+       if (region_id == LINEAR_MAP_REGION_ID) {
                /*
                 * We already verified ea to be not beyond the addr limit.
                 */
index 9b0321061bc87bc33f09c77d145e745db350fe6d..f137286740cb9c19d210f92dcbab1404bfb0d119 100644 (file)
@@ -129,8 +129,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
                vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
                vsidkey = SLB_VSID_KERNEL;
                break;
-       case KERNEL_REGION_ID:
-               pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
+       case LINEAR_MAP_REGION_ID:
+               pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
                psize = mmu_linear_psize;
                ssize = mmu_kernel_ssize;
                vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
index 721cb09c9044bb9a2c42fe5d052315de821a5763..89e4531de64b48a66f43df5e307342de4c12e254 100644 (file)
@@ -691,7 +691,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
        unsigned long flags;
        int ssize;
 
-       if (id == KERNEL_REGION_ID) {
+       if (id == LINEAR_MAP_REGION_ID) {
 
                /* We only support upto MAX_PHYSMEM_BITS */
                if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS))
@@ -790,7 +790,7 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea)
         * first class kernel code. But for performance it's probably nicer
         * if they go via fast_exception_return too.
         */
-       if (id >= KERNEL_REGION_ID) {
+       if (id >= LINEAR_MAP_REGION_ID) {
                long err;
 #ifdef CONFIG_DEBUG_VM
                /* Catch recursive kernel SLB faults. */
index 4770cce1bfe206b275e36330676f72b31f1b0967..6646f152d57bcb83d7172a8f82b6e99decf1366c 100644 (file)
@@ -224,7 +224,7 @@ static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
        unsigned long ea = (unsigned long)addr;
        u64 llp;
 
-       if (get_region_id(ea) == KERNEL_REGION_ID)
+       if (get_region_id(ea) == LINEAR_MAP_REGION_ID)
                llp = mmu_psize_defs[mmu_linear_psize].sllp;
        else
                llp = mmu_psize_defs[mmu_virtual_psize].sllp;