powerpc/mm: Move pte_fragment_alloc() to a common location
[linux-2.6-block.git] / arch / powerpc / mm / pgtable-book3s64.c
index 9f93c9f985c5f1ebed09ac552c5b3fdc6b937abf..0c0fd173208a39a0052e084276c287c076571307 100644 (file)
@@ -322,91 +322,6 @@ void pmd_fragment_free(unsigned long *pmd)
        }
 }
 
-static pte_t *get_pte_from_cache(struct mm_struct *mm)
-{
-       void *pte_frag, *ret;
-
-       spin_lock(&mm->page_table_lock);
-       ret = mm->context.pte_frag;
-       if (ret) {
-               pte_frag = ret + PTE_FRAG_SIZE;
-               /*
-                * If we have taken up all the fragments mark PTE page NULL
-                */
-               if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
-                       pte_frag = NULL;
-               mm->context.pte_frag = pte_frag;
-       }
-       spin_unlock(&mm->page_table_lock);
-       return (pte_t *)ret;
-}
-
-static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
-{
-       void *ret = NULL;
-       struct page *page;
-
-       if (!kernel) {
-               page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
-               if (!page)
-                       return NULL;
-               if (!pgtable_page_ctor(page)) {
-                       __free_page(page);
-                       return NULL;
-               }
-       } else {
-               page = alloc_page(PGALLOC_GFP);
-               if (!page)
-                       return NULL;
-       }
-
-       atomic_set(&page->pt_frag_refcount, 1);
-
-       ret = page_address(page);
-       /*
-        * if we support only one fragment just return the
-        * allocated page.
-        */
-       if (PTE_FRAG_NR == 1)
-               return ret;
-       spin_lock(&mm->page_table_lock);
-       /*
-        * If we find pgtable_page set, we return
-        * the allocated page with single fragement
-        * count.
-        */
-       if (likely(!mm->context.pte_frag)) {
-               atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
-               mm->context.pte_frag = ret + PTE_FRAG_SIZE;
-       }
-       spin_unlock(&mm->page_table_lock);
-
-       return (pte_t *)ret;
-}
-
-pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
-{
-       pte_t *pte;
-
-       pte = get_pte_from_cache(mm);
-       if (pte)
-               return pte;
-
-       return __alloc_for_ptecache(mm, kernel);
-}
-
-void pte_fragment_free(unsigned long *table, int kernel)
-{
-       struct page *page = virt_to_page(table);
-
-       BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
-       if (atomic_dec_and_test(&page->pt_frag_refcount)) {
-               if (!kernel)
-                       pgtable_page_dtor(page);
-               __free_page(page);
-       }
-}
-
 static inline void pgtable_free(void *table, int index)
 {
        switch (index) {