powerc/mm/hash: Reduce hash_mm_context size
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Wed, 17 Apr 2019 13:03:51 +0000 (18:33 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 21 Apr 2019 13:12:39 +0000 (23:12 +1000)
Allocate subpage protect related variables only if we use the feature.
This helps in reducing the hash related mm context struct by around 4K

Before the patch
sizeof(struct hash_mm_context)  = 8288

After the patch
sizeof(struct hash_mm_context) = 4160

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/book3s/64/mmu.h
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/mm/subpage-prot.c

index 4481bedbb5beae476ec039b270b9f8b105a2bf90..eeb40091b46be4e4e64ea53c3ef3b2b55ad6b90d 100644 (file)
@@ -687,10 +687,8 @@ struct subpage_prot_table {
 #define SBP_L3_SHIFT           (SBP_L2_SHIFT + SBP_L2_BITS)
 
 extern void subpage_prot_free(struct mm_struct *mm);
-extern void subpage_prot_init_new_context(struct mm_struct *mm);
 #else
 static inline void subpage_prot_free(struct mm_struct *mm) {}
-static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
 #endif /* CONFIG_PPC_SUBPAGE_PROT */
 
 /*
@@ -720,7 +718,7 @@ struct hash_mm_context {
 #endif
 
 #ifdef CONFIG_PPC_SUBPAGE_PROT
-       struct subpage_prot_table spt;
+       struct subpage_prot_table *spt;
 #endif /* CONFIG_PPC_SUBPAGE_PROT */
 };
 
index e510e46b07ced57ac99bdfc2806c68fd23f35036..230a9dec7677484b1ba58afcc5b55ab8f7ba53fc 100644 (file)
@@ -206,7 +206,7 @@ static inline struct slice_mask *mm_ctx_slice_mask_16g(mm_context_t *ctx)
 #ifdef CONFIG_PPC_SUBPAGE_PROT
 static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
 {
-       return &ctx->hash_context->spt;
+       return ctx->hash_context->spt;
 }
 #endif
 
index 27239a07677354baff7f42f5483fe296b496f1ab..6a2d315495a3c68517ce9e1831ba6e21d3156d2f 100644 (file)
@@ -1150,6 +1150,9 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)
        u32 spp = 0;
        u32 **sbpm, *sbpp;
 
+       if (!spt)
+               return 0;
+
        if (ea >= spt->maxaddr)
                return 0;
        if (ea < 0x100000000UL) {
index 6eef5a36b2e93da8e815dc616fd445321ac79a49..cb2b08635508b7cbff91a76504442e531a25a3f2 100644 (file)
@@ -63,7 +63,8 @@ static int hash__init_new_context(struct mm_struct *mm)
        if (index < 0)
                return index;
 
-       mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), GFP_KERNEL);
+       mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
+                                          GFP_KERNEL);
        if (!mm->context.hash_context) {
                ida_free(&mmu_context_ida, index);
                return -ENOMEM;
@@ -89,11 +90,21 @@ static int hash__init_new_context(struct mm_struct *mm)
        } else {
                /* This is fork. Copy hash_context details from current->mm */
                memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+               /* inherit subpage prot detalis if we have one. */
+               if (current->mm->context.hash_context->spt) {
+                       mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
+                                                               GFP_KERNEL);
+                       if (!mm->context.hash_context->spt) {
+                               ida_free(&mmu_context_ida, index);
+                               kfree(mm->context.hash_context);
+                               return -ENOMEM;
+                       }
+               }
+#endif
 
        }
 
-       subpage_prot_init_new_context(mm);
-
        pkey_mm_init(mm);
        return index;
 }
index c72252542210d6f9f46ed45b64b5e0a1512b845f..c9dff4e1f295c05581d54ce323032e5b8a8494d7 100644 (file)
@@ -29,6 +29,9 @@ void subpage_prot_free(struct mm_struct *mm)
        unsigned long i, j, addr;
        u32 **p;
 
+       if (!spt)
+               return;
+
        for (i = 0; i < 4; ++i) {
                if (spt->low_prot[i]) {
                        free_page((unsigned long)spt->low_prot[i]);
@@ -48,13 +51,7 @@ void subpage_prot_free(struct mm_struct *mm)
                free_page((unsigned long)p);
        }
        spt->maxaddr = 0;
-}
-
-void subpage_prot_init_new_context(struct mm_struct *mm)
-{
-       struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
-
-       memset(spt, 0, sizeof(*spt));
+       kfree(spt);
 }
 
 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
@@ -99,6 +96,9 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
        size_t nw;
        unsigned long next, limit;
 
+       if (!spt)
+               return ;
+
        down_write(&mm->mmap_sem);
        limit = addr + len;
        if (limit > spt->maxaddr)
@@ -218,6 +218,20 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
                return -EFAULT;
 
        down_write(&mm->mmap_sem);
+
+       if (!spt) {
+               /*
+                * Allocate subpage prot table if not already done.
+                * Do this with mmap_sem held
+                */
+               spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
+               if (!spt) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+               mm->context.hash_context->spt = spt;
+       }
+
        subpage_mark_vma_nohuge(mm, addr, len);
        for (limit = addr + len; addr < limit; addr = next) {
                next = pmd_addr_end(addr, limit);