powerpc/mm: Add helpers for accessing hash translation related variables
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Wed, 17 Apr 2019 13:03:48 +0000 (18:33 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 21 Apr 2019 13:12:38 +0000 (23:12 +1000)
We want to switch to allocating them runtime only when hash translation is
enabled. Add helpers so that both book3s and nohash can be adapted to
upcoming change easily.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/book3s/64/mmu.h
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
arch/powerpc/kernel/paca.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/slb.c
arch/powerpc/mm/slice.c
arch/powerpc/mm/subpage-prot.c

index a28a28079edba94fd2c9a4773137b2f190f8c995..eb36fbfe4ef5d19b63facecff3f86e60fd8461f3 100644 (file)
@@ -657,8 +657,8 @@ extern void slb_set_size(u16 size);
 
 /* 4 bits per slice and we have one slice per 1TB */
 #define SLICE_ARRAY_SIZE       (H_PGTABLE_RANGE >> 41)
-#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.slb_addr_limit >> 41)
-
+#define LOW_SLICE_ARRAY_SZ     (BITS_PER_LONG / BITS_PER_BYTE)
+#define TASK_SLICE_ARRAY_SZ(x) ((x)->slb_addr_limit >> 41)
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_PPC_SUBPAGE_PROT
index afe10dd11c6823aade537b4f643227e1456bb3a1..c9f3170906203b6f240218903b47414af2e1c9d2 100644 (file)
@@ -139,7 +139,7 @@ typedef struct {
        struct npu_context *npu_context;
 
         /* SLB page size encodings*/
-       unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
+       unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
        unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
        unsigned long slb_addr_limit;
 # ifdef CONFIG_PPC_64K_PAGES
@@ -174,6 +174,67 @@ typedef struct {
 #endif
 } mm_context_t;
 
+static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
+{
+       return ctx->user_psize;
+}
+
+static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
+{
+       ctx->user_psize = user_psize;
+}
+
+static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
+{
+       return ctx->low_slices_psize;
+}
+
+static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
+{
+       return ctx->high_slices_psize;
+}
+
+static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
+{
+       return ctx->slb_addr_limit;
+}
+
+static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
+{
+       ctx->slb_addr_limit = limit;
+}
+
+#ifdef CONFIG_PPC_64K_PAGES
+static inline struct slice_mask *mm_ctx_slice_mask_64k(mm_context_t *ctx)
+{
+       return &ctx->mask_64k;
+}
+#endif
+
+static inline struct slice_mask *mm_ctx_slice_mask_4k(mm_context_t *ctx)
+{
+       return &ctx->mask_4k;
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline struct slice_mask *mm_ctx_slice_mask_16m(mm_context_t *ctx)
+{
+       return &ctx->mask_16m;
+}
+
+static inline struct slice_mask *mm_ctx_slice_mask_16g(mm_context_t *ctx)
+{
+       return &ctx->mask_16g;
+}
+#endif
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
+{
+       return &ctx->spt;
+}
+#endif
+
 /*
  * The current system page and segment sizes
  */
index f620adef54fcd2f21832634167c61f11b11cf711..c503e2f05e6115fb8ecd504e4ca568e08ff49962 100644 (file)
 #ifdef CONFIG_PPC_MM_SLICES
 #include <asm/nohash/32/slice.h>
 #define SLICE_ARRAY_SIZE       (1 << (32 - SLICE_LOW_SHIFT - 1))
+#define LOW_SLICE_ARRAY_SZ     SLICE_ARRAY_SIZE
 #endif
 
 #ifndef __ASSEMBLY__
@@ -207,6 +208,55 @@ typedef struct {
        void *pte_frag;
 } mm_context_t;
 
+#ifdef CONFIG_PPC_MM_SLICES
+static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
+{
+       return ctx->user_psize;
+}
+
+static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
+{
+       ctx->user_psize = user_psize;
+}
+
+static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
+{
+       return ctx->low_slices_psize;
+}
+
+static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
+{
+       return ctx->high_slices_psize;
+}
+
+static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
+{
+       return ctx->slb_addr_limit;
+}
+
+static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
+{
+       ctx->slb_addr_limit = limit;
+}
+
+static inline struct slice_mask *mm_ctx_slice_mask_base(mm_context_t *ctx)
+{
+       return &ctx->mask_base_psize;
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline struct slice_mask *mm_ctx_slice_mask_512k(mm_context_t *ctx)
+{
+       return &ctx->mask_512k;
+}
+
+static inline struct slice_mask *mm_ctx_slice_mask_8m(mm_context_t *ctx)
+{
+       return &ctx->mask_8m;
+}
+#endif
+#endif /* CONFIG_PPC_MM_SLICE */
+
 #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
 #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
 
index e7382abee86849f136a6337eedf58a417dba6ea9..9cc91d03ab62170a344812b3f557c2e16f7f1171 100644 (file)
@@ -267,12 +267,12 @@ void copy_mm_to_paca(struct mm_struct *mm)
 
        get_paca()->mm_ctx_id = context->id;
 #ifdef CONFIG_PPC_MM_SLICES
-       VM_BUG_ON(!mm->context.slb_addr_limit);
-       get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
-       memcpy(&get_paca()->mm_ctx_low_slices_psize,
-              &context->low_slices_psize, sizeof(context->low_slices_psize));
-       memcpy(&get_paca()->mm_ctx_high_slices_psize,
-              &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
+       VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
+       get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
+       memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
+              LOW_SLICE_ARRAY_SZ);
+       memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
+              TASK_SLICE_ARRAY_SZ(context));
 #else /* CONFIG_PPC_MM_SLICES */
        get_paca()->mm_ctx_user_psize = context->user_psize;
        get_paca()->mm_ctx_sllp = context->sllp;
index c4c9610ce6e3c261496c7ed80ca9fe553dbde37a..fee0270618ac06fe17a0c4536c31388f8df7ac93 100644 (file)
@@ -1142,7 +1142,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
  */
 static int subpage_protection(struct mm_struct *mm, unsigned long ea)
 {
-       struct subpage_prot_table *spt = &mm->context.spt;
+       struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
        u32 spp = 0;
        u32 **sbpm, *sbpp;
 
@@ -1465,7 +1465,7 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
        int psize = get_slice_psize(mm, ea);
 
        /* We only prefault standard pages for now */
-       if (unlikely(psize != mm->context.user_psize))
+       if (unlikely(psize != mm_ctx_user_psize(&mm->context)))
                return false;
 
        /*
@@ -1544,7 +1544,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
 
        /* Hash it in */
 #ifdef CONFIG_PPC_64K_PAGES
-       if (mm->context.user_psize == MMU_PAGE_64K)
+       if (mm_ctx_user_psize(&mm->context) == MMU_PAGE_64K)
                rc = __hash_page_64K(ea, access, vsid, ptep, trap,
                                     update_flags, ssize);
        else
@@ -1557,8 +1557,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
         */
        if (rc == -1)
                hash_failure_debug(ea, access, vsid, trap, ssize,
-                                  mm->context.user_psize,
-                                  mm->context.user_psize,
+                                  mm_ctx_user_psize(&mm->context),
+                                  mm_ctx_user_psize(&mm->context),
                                   pte_val(*ptep));
 out_exit:
        local_irq_restore(flags);
index 5986df48359b0a9e6cd08b3926ae0b5b72a69d89..78c0c0a0e3555b930b689914ec8d44773d3996e6 100644 (file)
@@ -739,7 +739,7 @@ static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
         * consider this as bad access if we take a SLB miss
         * on an address above addr limit.
         */
-       if (ea >= mm->context.slb_addr_limit)
+       if (ea >= mm_ctx_slb_addr_limit(&mm->context))
                return -EFAULT;
 
        context = get_user_context(&mm->context, ea);
index aec91dbcdc0b47bef604eeb802cd40e999a48f7c..35b2780823916aa4963e4c21b5ac3fc1552290a9 100644 (file)
@@ -101,7 +101,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
 {
        struct vm_area_struct *vma;
 
-       if ((mm->context.slb_addr_limit - len) < addr)
+       if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
                return 0;
        vma = find_vma(mm, addr);
        return (!vma || (addr + len) <= vm_start_gap(vma));
@@ -155,15 +155,15 @@ static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
 {
 #ifdef CONFIG_PPC_64K_PAGES
        if (psize == MMU_PAGE_64K)
-               return &mm->context.mask_64k;
+               return mm_ctx_slice_mask_64k(&mm->context);
 #endif
        if (psize == MMU_PAGE_4K)
-               return &mm->context.mask_4k;
+               return mm_ctx_slice_mask_4k(&mm->context);
 #ifdef CONFIG_HUGETLB_PAGE
        if (psize == MMU_PAGE_16M)
-               return &mm->context.mask_16m;
+               return mm_ctx_slice_mask_16m(&mm->context);
        if (psize == MMU_PAGE_16G)
-               return &mm->context.mask_16g;
+               return mm_ctx_slice_mask_16g(&mm->context);
 #endif
        BUG();
 }
@@ -253,7 +253,7 @@ static void slice_convert(struct mm_struct *mm,
         */
        spin_lock_irqsave(&slice_convert_lock, flags);
 
-       lpsizes = mm->context.low_slices_psize;
+       lpsizes = mm_ctx_low_slices(&mm->context);
        for (i = 0; i < SLICE_NUM_LOW; i++) {
                if (!(mask->low_slices & (1u << i)))
                        continue;
@@ -272,8 +272,8 @@ static void slice_convert(struct mm_struct *mm,
                                (((unsigned long)psize) << (mask_index * 4));
        }
 
-       hpsizes = mm->context.high_slices_psize;
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
+       hpsizes = mm_ctx_high_slices(&mm->context);
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
                if (!test_bit(i, mask->high_slices))
                        continue;
 
@@ -292,8 +292,8 @@ static void slice_convert(struct mm_struct *mm,
        }
 
        slice_dbg(" lsps=%lx, hsps=%lx\n",
-                 (unsigned long)mm->context.low_slices_psize,
-                 (unsigned long)mm->context.high_slices_psize);
+                 (unsigned long)mm_ctx_low_slices(&mm->context),
+                 (unsigned long)mm_ctx_high_slices(&mm->context));
 
        spin_unlock_irqrestore(&slice_convert_lock, flags);
 
@@ -393,7 +393,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
         * DEFAULT_MAP_WINDOW we should apply this.
         */
        if (high_limit > DEFAULT_MAP_WINDOW)
-               addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
+               addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
 
        while (addr > min_addr) {
                info.high_limit = addr;
@@ -505,20 +505,20 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
                        return -ENOMEM;
        }
 
-       if (high_limit > mm->context.slb_addr_limit) {
+       if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
                /*
                 * Increasing the slb_addr_limit does not require
                 * slice mask cache to be recalculated because it should
                 * be already initialised beyond the old address limit.
                 */
-               mm->context.slb_addr_limit = high_limit;
+               mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
 
                on_each_cpu(slice_flush_segments, mm, 1);
        }
 
        /* Sanity checks */
        BUG_ON(mm->task_size == 0);
-       BUG_ON(mm->context.slb_addr_limit == 0);
+       BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
        VM_BUG_ON(radix_enabled());
 
        slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
@@ -696,7 +696,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
                                     unsigned long flags)
 {
        return slice_get_unmapped_area(addr, len, flags,
-                                      current->mm->context.user_psize, 0);
+                                      mm_ctx_user_psize(&current->mm->context), 0);
 }
 
 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
@@ -706,7 +706,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
                                             const unsigned long flags)
 {
        return slice_get_unmapped_area(addr0, len, flags,
-                                      current->mm->context.user_psize, 1);
+                                      mm_ctx_user_psize(&current->mm->context), 1);
 }
 
 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
@@ -717,10 +717,10 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
        VM_BUG_ON(radix_enabled());
 
        if (slice_addr_is_low(addr)) {
-               psizes = mm->context.low_slices_psize;
+               psizes = mm_ctx_low_slices(&mm->context);
                index = GET_LOW_SLICE_INDEX(addr);
        } else {
-               psizes = mm->context.high_slices_psize;
+               psizes = mm_ctx_high_slices(&mm->context);
                index = GET_HIGH_SLICE_INDEX(addr);
        }
        mask_index = index & 0x1;
@@ -742,20 +742,19 @@ void slice_init_new_context_exec(struct mm_struct *mm)
         * duplicated.
         */
 #ifdef CONFIG_PPC64
-       mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
+       mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW_USER64);
 #else
        mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
 #endif
-
-       mm->context.user_psize = psize;
+       mm_ctx_set_user_psize(&mm->context, psize);
 
        /*
         * Set all slice psizes to the default.
         */
-       lpsizes = mm->context.low_slices_psize;
+       lpsizes = mm_ctx_low_slices(&mm->context);
        memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
 
-       hpsizes = mm->context.high_slices_psize;
+       hpsizes = mm_ctx_high_slices(&mm->context);
        memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
 
        /*
@@ -777,7 +776,7 @@ void slice_setup_new_exec(void)
        if (!is_32bit_task())
                return;
 
-       mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
+       mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
 }
 #endif
 
@@ -816,7 +815,7 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
                           unsigned long len)
 {
        const struct slice_mask *maskp;
-       unsigned int psize = mm->context.user_psize;
+       unsigned int psize = mm_ctx_user_psize(&mm->context);
 
        VM_BUG_ON(radix_enabled());
 
index 5e4178790deef77d7edebd8601113c4c96829e9c..c72252542210d6f9f46ed45b64b5e0a1512b845f 100644 (file)
@@ -25,7 +25,7 @@
  */
 void subpage_prot_free(struct mm_struct *mm)
 {
-       struct subpage_prot_table *spt = &mm->context.spt;
+       struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
        unsigned long i, j, addr;
        u32 **p;
 
@@ -52,7 +52,7 @@ void subpage_prot_free(struct mm_struct *mm)
 
 void subpage_prot_init_new_context(struct mm_struct *mm)
 {
-       struct subpage_prot_table *spt = &mm->context.spt;
+       struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
 
        memset(spt, 0, sizeof(*spt));
 }
@@ -93,7 +93,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
 static void subpage_prot_clear(unsigned long addr, unsigned long len)
 {
        struct mm_struct *mm = current->mm;
-       struct subpage_prot_table *spt = &mm->context.spt;
+       struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
        u32 **spm, *spp;
        unsigned long i;
        size_t nw;
@@ -189,7 +189,7 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
                unsigned long, len, u32 __user *, map)
 {
        struct mm_struct *mm = current->mm;
-       struct subpage_prot_table *spt = &mm->context.spt;
+       struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
        u32 **spm, *spp;
        unsigned long i;
        size_t nw;