sparc64: Use 'ILOG2_4MB' instead of constant '22'.
authorDavid S. Miller <davem@davemloft.net>
Sun, 4 May 2014 05:52:50 +0000 (22:52 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 4 May 2014 05:52:50 +0000 (22:52 -0700)
Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc/include/asm/pgtable_64.h
arch/sparc/kernel/head_64.S
arch/sparc/kernel/ktlb.S
arch/sparc/mm/init_64.c

index ff97960b624287ff95c91f09ec82a22e095b2d47..0ad0a1285bd0d7f6d1ee2a3a7aa1794a134da2ef 100644 (file)
@@ -918,7 +918,7 @@ static inline bool kern_addr_valid(unsigned long addr)
 
        if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL)
                return false;
-       return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
+       return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap);
 }
 
 extern int page_in_phys_avail(unsigned long paddr);
index 26b706a1867dc6b9976b52e1e61ef8e54ce0df91..452f04fe8da698bb8b4620abd40ac6d4fbcd8393 100644 (file)
@@ -282,8 +282,8 @@ sun4v_chip_type:
        stx     %l2, [%l4 + 0x0]
        ldx     [%sp + 2047 + 128 + 0x50], %l3  ! physaddr low
        /* 4MB align */
-       srlx    %l3, 22, %l3
-       sllx    %l3, 22, %l3
+       srlx    %l3, ILOG2_4MB, %l3
+       sllx    %l3, ILOG2_4MB, %l3
        stx     %l3, [%l4 + 0x8]
 
        /* Leave service as-is, "call-method" */
index 542e96ac4d39948c165bd63eb9a43e25c0ba72e4..605d49204580585356a7fda6dede8657641fb7e1 100644 (file)
@@ -277,7 +277,7 @@ kvmap_dtlb_load:
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 kvmap_vmemmap:
        sub             %g4, %g5, %g5
-       srlx            %g5, 22, %g5
+       srlx            %g5, ILOG2_4MB, %g5
        sethi           %hi(vmemmap_table), %g1
        sllx            %g5, 3, %g5
        or              %g1, %lo(vmemmap_table), %g1
index eafbc65c9c47f63772162d384ef55fd549d1e149..ed3c969a5f4c897e802b02dcfce583acabcf3a14 100644 (file)
@@ -588,7 +588,7 @@ static void __init remap_kernel(void)
        int i, tlb_ent = sparc64_highest_locked_tlbent();
 
        tte_vaddr = (unsigned long) KERNBASE;
-       phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+       phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
        tte_data = kern_large_tte(phys_page);
 
        kern_locked_tte_data = tte_data;
@@ -1881,7 +1881,7 @@ void __init paging_init(void)
 
        BUILD_BUG_ON(NR_CPUS > 4096);
 
-       kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+       kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
        kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
        /* Invalidate both kernel TSBs.  */
@@ -1937,7 +1937,7 @@ void __init paging_init(void)
        shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
 
        real_end = (unsigned long)_end;
-       num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
+       num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
        printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
               num_kernel_image_mappings);
 
@@ -2094,7 +2094,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
 
                                if (new_start <= old_start &&
                                    new_end >= (old_start + PAGE_SIZE)) {
-                                       set_bit(old_start >> 22, bitmap);
+                                       set_bit(old_start >> ILOG2_4MB, bitmap);
                                        goto do_next_page;
                                }
                        }
@@ -2143,7 +2143,7 @@ void __init mem_init(void)
        addr = PAGE_OFFSET + kern_base;
        last = PAGE_ALIGN(kern_size) + addr;
        while (addr < last) {
-               set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
+               set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
                addr += PAGE_SIZE;
        }
 
@@ -2267,7 +2267,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
                void *block;
 
                if (!(*vmem_pp & _PAGE_VALID)) {
-                       block = vmemmap_alloc_block(1UL << 22, node);
+                       block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
                        if (!block)
                                return -ENOMEM;