sparc32: Remove cypress cpu support.
[linux-block.git] / arch / sparc / mm / srmmu.c
index cbef74e793b8df9c9f3b7dc474f7a64feb082b3d..4875fcd8fd7a7f87f7d3a2e5b4ae7bb818890f5f 100644 (file)
 #include <asm/turbosparc.h>
 #include <asm/leon.h>
 
-#include <asm/btfixup.h>
-
 enum mbus_module srmmu_modtype;
 static unsigned int hwbug_bitmask;
 int vac_cache_size;
 int vac_line_size;
 
+struct ctx_list *ctx_list_pool;
+struct ctx_list ctx_free;
+struct ctx_list ctx_used;
+
 extern struct resource sparc_iomap;
 
 extern unsigned long last_valid_pfn;
 
-extern unsigned long page_kernel;
-
 static pgd_t *srmmu_swapper_pg_dir;
 
+const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
+
 #ifdef CONFIG_SMP
+const struct sparc32_cachetlb_ops *local_ops;
+
 #define FLUSH_BEGIN(mm)
 #define FLUSH_END
 #else
-#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
+#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
 #define FLUSH_END      }
 #endif
 
-BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
-#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
-
 int flush_page_for_dma_global = 1;
 
-#ifdef CONFIG_SMP
-BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
-#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
-#endif
-
 char *srmmu_name;
 
 ctxd_t *srmmu_ctx_table_phys;
@@ -91,28 +87,6 @@ static DEFINE_SPINLOCK(srmmu_context_spinlock);
 
 static int is_hypersparc;
 
-/*
- * In general all page table modifications should use the V8 atomic
- * swap instruction.  This insures the mmu and the cpu are in sync
- * with respect to ref/mod bits in the page tables.
- */
-static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
-{
-       __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
-       return value;
-}
-
-static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval)
-{
-       srmmu_swap((unsigned long *)ptep, pte_val(pteval));
-}
-
-/* The very generic SRMMU page table operations. */
-static inline int srmmu_device_memory(unsigned long x)
-{
-       return ((x & 0xF0000000) != 0);
-}
-
 static int srmmu_cache_pagetables;
 
 /* these will be initialized in srmmu_nocache_calcsize() */
@@ -129,67 +103,12 @@ void *srmmu_nocache_pool;
 void *srmmu_nocache_bitmap;
 static struct bit_map srmmu_nocache_map;
 
-static unsigned long srmmu_pte_pfn(pte_t pte)
-{
-       if (srmmu_device_memory(pte_val(pte))) {
-               /* Just return something that will cause
-                * pfn_valid() to return false.  This makes
-                * copy_one_pte() to just directly copy to
-                * PTE over.
-                */
-               return ~0UL;
-       }
-       return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
-}
-
-static struct page *srmmu_pmd_page(pmd_t pmd)
-{
-
-       if (srmmu_device_memory(pmd_val(pmd)))
-               BUG();
-       return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
-}
-
-static inline unsigned long srmmu_pgd_page(pgd_t pgd)
-{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
-
-
 static inline int srmmu_pte_none(pte_t pte)
 { return !(pte_val(pte) & 0xFFFFFFF); }
 
-static inline int srmmu_pte_present(pte_t pte)
-{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
-
-static inline void srmmu_pte_clear(pte_t *ptep)
-{ srmmu_set_pte(ptep, __pte(0)); }
-
 static inline int srmmu_pmd_none(pmd_t pmd)
 { return !(pmd_val(pmd) & 0xFFFFFFF); }
 
-static inline int srmmu_pmd_bad(pmd_t pmd)
-{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
-
-static inline int srmmu_pmd_present(pmd_t pmd)
-{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-
-static inline void srmmu_pmd_clear(pmd_t *pmdp) {
-       int i;
-       for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
-               srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
-}
-
-static inline int srmmu_pgd_none(pgd_t pgd)          
-{ return !(pgd_val(pgd) & 0xFFFFFFF); }
-
-static inline int srmmu_pgd_bad(pgd_t pgd)
-{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
-
-static inline int srmmu_pgd_present(pgd_t pgd)
-{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-
-static inline void srmmu_pgd_clear(pgd_t * pgdp)
-{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); }
-
 static inline pte_t srmmu_pte_wrprotect(pte_t pte)
 { return __pte(pte_val(pte) & ~SRMMU_WRITE);}
 
@@ -199,55 +118,30 @@ static inline pte_t srmmu_pte_mkclean(pte_t pte)
 static inline pte_t srmmu_pte_mkold(pte_t pte)
 { return __pte(pte_val(pte) & ~SRMMU_REF);}
 
-static inline pte_t srmmu_pte_mkwrite(pte_t pte)
-{ return __pte(pte_val(pte) | SRMMU_WRITE);}
-
-static inline pte_t srmmu_pte_mkdirty(pte_t pte)
-{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
-
-static inline pte_t srmmu_pte_mkyoung(pte_t pte)
-{ return __pte(pte_val(pte) | SRMMU_REF);}
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
-{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
-
-static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
-{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
-
-static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
-{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
-
 /* XXX should we hyper_flush_whole_icache here - Anton */
 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
-{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
-
-static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
-{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
+{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
 
-static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
+void pmd_set(pmd_t *pmdp, pte_t *ptep)
 {
        unsigned long ptp;      /* Physical address, shifted right by 4 */
        int i;
 
        ptp = __nocache_pa((unsigned long) ptep) >> 4;
        for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
-               srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
+               set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
                ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
        }
 }
 
-static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
+void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
 {
        unsigned long ptp;      /* Physical address, shifted right by 4 */
        int i;
 
        ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4);      /* watch for overflow */
        for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
-               srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
+               set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
                ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
        }
 }
@@ -259,15 +153,8 @@ static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
 static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
 { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
 
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
-{
-       return (pmd_t *) srmmu_pgd_page(*dir) +
-           ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-}
-
 /* Find an entry in the third-level page table.. */ 
-static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
+pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
 {
        void *pte;
 
@@ -276,23 +163,6 @@ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
            ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 }
 
-static unsigned long srmmu_swp_type(swp_entry_t entry)
-{
-       return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
-}
-
-static unsigned long srmmu_swp_offset(swp_entry_t entry)
-{
-       return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
-}
-
-static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
-{
-       return (swp_entry_t) {
-                 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
-               | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
-}
-
 /*
  * size: bytes to allocate in the nocache area.
  * align: bytes, number to align at.
@@ -325,7 +195,7 @@ static unsigned long __srmmu_get_nocache(int size, int align)
        return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
 }
 
-static unsigned long srmmu_get_nocache(int size, int align)
+unsigned long srmmu_get_nocache(int size, int align)
 {
        unsigned long tmp;
 
@@ -337,7 +207,7 @@ static unsigned long srmmu_get_nocache(int size, int align)
        return tmp;
 }
 
-static void srmmu_free_nocache(unsigned long vaddr, int size)
+void srmmu_free_nocache(unsigned long vaddr, int size)
 {
        int offset;
 
@@ -429,15 +299,15 @@ static void __init srmmu_nocache_init(void)
 
        while (vaddr < srmmu_nocache_end) {
                pgd = pgd_offset_k(vaddr);
-               pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
-               pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr);
+               pmd = pmd_offset(__nocache_fix(pgd), vaddr);
+               pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
 
                pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
 
                if (srmmu_cache_pagetables)
                        pteval |= SRMMU_CACHE;
 
-               srmmu_set_pte(__nocache_fix(pte), __pte(pteval));
+               set_pte(__nocache_fix(pte), __pte(pteval));
 
                vaddr += PAGE_SIZE;
                paddr += PAGE_SIZE;
@@ -447,7 +317,7 @@ static void __init srmmu_nocache_init(void)
        flush_tlb_all();
 }
 
-static inline pgd_t *srmmu_get_pgd_fast(void)
+pgd_t *get_pgd_fast(void)
 {
        pgd_t *pgd = NULL;
 
@@ -462,21 +332,6 @@ static inline pgd_t *srmmu_get_pgd_fast(void)
        return pgd;
 }
 
-static void srmmu_free_pgd_fast(pgd_t *pgd)
-{
-       srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
-}
-
-static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
-       return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
-}
-
-static void srmmu_pmd_free(pmd_t * pmd)
-{
-       srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
-}
-
 /*
  * Hardware needs alignment to 256 only, but we align to whole page size
  * to reduce fragmentation problems due to the buddy principle.
@@ -485,31 +340,19 @@ static void srmmu_pmd_free(pmd_t * pmd)
  * Alignments up to the page size are the same for physical and virtual
  * addresses of the nocache area.
  */
-static pte_t *
-srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-       return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
-}
-
-static pgtable_t
-srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        unsigned long pte;
        struct page *page;
 
-       if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
+       if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
                return NULL;
        page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
        pgtable_page_ctor(page);
        return page;
 }
 
-static void srmmu_free_pte_fast(pte_t *pte)
-{
-       srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
-}
-
-static void srmmu_pte_free(pgtable_t pte)
+void pte_free(struct mm_struct *mm, pgtable_t pte)
 {
        unsigned long p;
 
@@ -560,8 +403,8 @@ static inline void free_context(int context)
 }
 
 
-static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
-    struct task_struct *tsk, int cpu)
+void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
+              struct task_struct *tsk)
 {
        if(mm->context == NO_CONTEXT) {
                spin_lock(&srmmu_context_spinlock);
@@ -590,8 +433,8 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
 
        physaddr &= PAGE_MASK;
        pgdp = pgd_offset_k(virt_addr);
-       pmdp = srmmu_pmd_offset(pgdp, virt_addr);
-       ptep = srmmu_pte_offset(pmdp, virt_addr);
+       pmdp = pmd_offset(pgdp, virt_addr);
+       ptep = pte_offset_kernel(pmdp, virt_addr);
        tmp = (physaddr >> 4) | SRMMU_ET_PTE;
 
        /*
@@ -602,11 +445,11 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
        tmp |= (bus_type << 28);
        tmp |= SRMMU_PRIV;
        __flush_page_to_ram(virt_addr);
-       srmmu_set_pte(ptep, __pte(tmp));
+       set_pte(ptep, __pte(tmp));
 }
 
-static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
-    unsigned long xva, unsigned int len)
+void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
+                     unsigned long xva, unsigned int len)
 {
        while (len != 0) {
                len -= PAGE_SIZE;
@@ -624,14 +467,14 @@ static inline void srmmu_unmapioaddr(unsigned long virt_addr)
        pte_t *ptep;
 
        pgdp = pgd_offset_k(virt_addr);
-       pmdp = srmmu_pmd_offset(pgdp, virt_addr);
-       ptep = srmmu_pte_offset(pmdp, virt_addr);
+       pmdp = pmd_offset(pgdp, virt_addr);
+       ptep = pte_offset_kernel(pmdp, virt_addr);
 
        /* No need to flush uncacheable page. */
-       srmmu_pte_clear(ptep);
+       __pte_clear(ptep);
 }
 
-static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
+void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
 {
        while (len != 0) {
                len -= PAGE_SIZE;
@@ -647,10 +490,9 @@ static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
  * pool.  As a side effect we are putting a little too much pressure
  * on the gfp() subsystem.  This setup also makes the logic of the
  * iommu mapping code a lot easier as we can transparently handle
- * mappings on the kernel stack without any special code as we did
- * need on the sun4c.
+ * mappings on the kernel stack without any special code.
  */
-static struct thread_info *srmmu_alloc_thread_info_node(int node)
+struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
 {
        struct thread_info *ret;
 
@@ -664,7 +506,7 @@ static struct thread_info *srmmu_alloc_thread_info_node(int node)
        return ret;
 }
 
-static void srmmu_free_thread_info(struct thread_info *ti)
+void free_thread_info(struct thread_info *ti)
 {
        free_pages((unsigned long)ti, THREAD_INFO_ORDER);
 }
@@ -683,38 +525,6 @@ extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long st
 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
 extern void tsunami_setup_blockops(void);
 
-/*
- * Workaround, until we find what's going on with Swift. When low on memory,
- * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
- * out it is already in page tables/ fault again on the same instruction.
- * I really don't understand it, have checked it and contexts
- * are right, flush_tlb_all is done as well, and it faults again...
- * Strange. -jj
- *
- * The following code is a deadwood that may be necessary when
- * we start to make precise page flushes again. --zaitcev
- */
-static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
-{
-#if 0
-       static unsigned long last;
-       unsigned int val;
-       /* unsigned int n; */
-
-       if (address == last) {
-               val = srmmu_hwprobe(address);
-               if (val != 0 && pte_val(*ptep) != val) {
-                       printk("swift_update_mmu_cache: "
-                           "addr %lx put %08x probed %08x from %p\n",
-                           address, pte_val(*ptep), val,
-                           __builtin_return_address(0));
-                       srmmu_flush_whole_tlb();
-               }
-       }
-       last = address;
-#endif
-}
-
 /* swift.S */
 extern void swift_flush_cache_all(void);
 extern void swift_flush_cache_mm(struct mm_struct *mm);
@@ -767,244 +577,6 @@ void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  * with respect to cache coherency.
  */
 
-/* Cypress flushes. */
-static void cypress_flush_cache_all(void)
-{
-       volatile unsigned long cypress_sucks;
-       unsigned long faddr, tagval;
-
-       flush_user_windows();
-       for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
-               __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
-                                    "=r" (tagval) :
-                                    "r" (faddr), "r" (0x40000),
-                                    "i" (ASI_M_DATAC_TAG));
-
-               /* If modified and valid, kick it. */
-               if((tagval & 0x60) == 0x60)
-                       cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
-       }
-}
-
-static void cypress_flush_cache_mm(struct mm_struct *mm)
-{
-       register unsigned long a, b, c, d, e, f, g;
-       unsigned long flags, faddr;
-       int octx;
-
-       FLUSH_BEGIN(mm)
-       flush_user_windows();
-       local_irq_save(flags);
-       octx = srmmu_get_context();
-       srmmu_set_context(mm->context);
-       a = 0x20; b = 0x40; c = 0x60;
-       d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-
-       faddr = (0x10000 - 0x100);
-       goto inside;
-       do {
-               faddr -= 0x100;
-       inside:
-               __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-                                    "sta %%g0, [%0 + %2] %1\n\t"
-                                    "sta %%g0, [%0 + %3] %1\n\t"
-                                    "sta %%g0, [%0 + %4] %1\n\t"
-                                    "sta %%g0, [%0 + %5] %1\n\t"
-                                    "sta %%g0, [%0 + %6] %1\n\t"
-                                    "sta %%g0, [%0 + %7] %1\n\t"
-                                    "sta %%g0, [%0 + %8] %1\n\t" : :
-                                    "r" (faddr), "i" (ASI_M_FLUSH_CTX),
-                                    "r" (a), "r" (b), "r" (c), "r" (d),
-                                    "r" (e), "r" (f), "r" (g));
-       } while(faddr);
-       srmmu_set_context(octx);
-       local_irq_restore(flags);
-       FLUSH_END
-}
-
-static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       register unsigned long a, b, c, d, e, f, g;
-       unsigned long flags, faddr;
-       int octx;
-
-       FLUSH_BEGIN(mm)
-       flush_user_windows();
-       local_irq_save(flags);
-       octx = srmmu_get_context();
-       srmmu_set_context(mm->context);
-       a = 0x20; b = 0x40; c = 0x60;
-       d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-
-       start &= SRMMU_REAL_PMD_MASK;
-       while(start < end) {
-               faddr = (start + (0x10000 - 0x100));
-               goto inside;
-               do {
-                       faddr -= 0x100;
-               inside:
-                       __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-                                            "sta %%g0, [%0 + %2] %1\n\t"
-                                            "sta %%g0, [%0 + %3] %1\n\t"
-                                            "sta %%g0, [%0 + %4] %1\n\t"
-                                            "sta %%g0, [%0 + %5] %1\n\t"
-                                            "sta %%g0, [%0 + %6] %1\n\t"
-                                            "sta %%g0, [%0 + %7] %1\n\t"
-                                            "sta %%g0, [%0 + %8] %1\n\t" : :
-                                            "r" (faddr),
-                                            "i" (ASI_M_FLUSH_SEG),
-                                            "r" (a), "r" (b), "r" (c), "r" (d),
-                                            "r" (e), "r" (f), "r" (g));
-               } while (faddr != start);
-               start += SRMMU_REAL_PMD_SIZE;
-       }
-       srmmu_set_context(octx);
-       local_irq_restore(flags);
-       FLUSH_END
-}
-
-static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
-{
-       register unsigned long a, b, c, d, e, f, g;
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long flags, line;
-       int octx;
-
-       FLUSH_BEGIN(mm)
-       flush_user_windows();
-       local_irq_save(flags);
-       octx = srmmu_get_context();
-       srmmu_set_context(mm->context);
-       a = 0x20; b = 0x40; c = 0x60;
-       d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-
-       page &= PAGE_MASK;
-       line = (page + PAGE_SIZE) - 0x100;
-       goto inside;
-       do {
-               line -= 0x100;
-       inside:
-                       __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-                                            "sta %%g0, [%0 + %2] %1\n\t"
-                                            "sta %%g0, [%0 + %3] %1\n\t"
-                                            "sta %%g0, [%0 + %4] %1\n\t"
-                                            "sta %%g0, [%0 + %5] %1\n\t"
-                                            "sta %%g0, [%0 + %6] %1\n\t"
-                                            "sta %%g0, [%0 + %7] %1\n\t"
-                                            "sta %%g0, [%0 + %8] %1\n\t" : :
-                                            "r" (line),
-                                            "i" (ASI_M_FLUSH_PAGE),
-                                            "r" (a), "r" (b), "r" (c), "r" (d),
-                                            "r" (e), "r" (f), "r" (g));
-       } while(line != page);
-       srmmu_set_context(octx);
-       local_irq_restore(flags);
-       FLUSH_END
-}
-
-/* Cypress is copy-back, at least that is how we configure it. */
-static void cypress_flush_page_to_ram(unsigned long page)
-{
-       register unsigned long a, b, c, d, e, f, g;
-       unsigned long line;
-
-       a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-       page &= PAGE_MASK;
-       line = (page + PAGE_SIZE) - 0x100;
-       goto inside;
-       do {
-               line -= 0x100;
-       inside:
-               __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-                                    "sta %%g0, [%0 + %2] %1\n\t"
-                                    "sta %%g0, [%0 + %3] %1\n\t"
-                                    "sta %%g0, [%0 + %4] %1\n\t"
-                                    "sta %%g0, [%0 + %5] %1\n\t"
-                                    "sta %%g0, [%0 + %6] %1\n\t"
-                                    "sta %%g0, [%0 + %7] %1\n\t"
-                                    "sta %%g0, [%0 + %8] %1\n\t" : :
-                                    "r" (line),
-                                    "i" (ASI_M_FLUSH_PAGE),
-                                    "r" (a), "r" (b), "r" (c), "r" (d),
-                                    "r" (e), "r" (f), "r" (g));
-       } while(line != page);
-}
-
-/* Cypress is also IO cache coherent. */
-static void cypress_flush_page_for_dma(unsigned long page)
-{
-}
-
-/* Cypress has unified L2 VIPT, from which both instructions and data
- * are stored.  It does not have an onboard icache of any sort, therefore
- * no flush is necessary.
- */
-static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
-{
-}
-
-static void cypress_flush_tlb_all(void)
-{
-       srmmu_flush_whole_tlb();
-}
-
-static void cypress_flush_tlb_mm(struct mm_struct *mm)
-{
-       FLUSH_BEGIN(mm)
-       __asm__ __volatile__(
-       "lda    [%0] %3, %%g5\n\t"
-       "sta    %2, [%0] %3\n\t"
-       "sta    %%g0, [%1] %4\n\t"
-       "sta    %%g5, [%0] %3\n"
-       : /* no outputs */
-       : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
-         "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
-       : "g5");
-       FLUSH_END
-}
-
-static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long size;
-
-       FLUSH_BEGIN(mm)
-       start &= SRMMU_PGDIR_MASK;
-       size = SRMMU_PGDIR_ALIGN(end) - start;
-       __asm__ __volatile__(
-               "lda    [%0] %5, %%g5\n\t"
-               "sta    %1, [%0] %5\n"
-               "1:\n\t"
-               "subcc  %3, %4, %3\n\t"
-               "bne    1b\n\t"
-               " sta   %%g0, [%2 + %3] %6\n\t"
-               "sta    %%g5, [%0] %5\n"
-       : /* no outputs */
-       : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
-         "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
-         "i" (ASI_M_FLUSH_PROBE)
-       : "g5", "cc");
-       FLUSH_END
-}
-
-static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
-       struct mm_struct *mm = vma->vm_mm;
-
-       FLUSH_BEGIN(mm)
-       __asm__ __volatile__(
-       "lda    [%0] %3, %%g5\n\t"
-       "sta    %1, [%0] %3\n\t"
-       "sta    %%g0, [%2] %4\n\t"
-       "sta    %%g5, [%0] %3\n"
-       : /* no outputs */
-       : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
-         "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
-       : "g5");
-       FLUSH_END
-}
-
 /* viking.S */
 extern void viking_flush_cache_all(void);
 extern void viking_flush_cache_mm(struct mm_struct *mm);
@@ -1065,21 +637,21 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
 
        while(start < end) {
                pgdp = pgd_offset_k(start);
-               if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+               if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
                        pmdp = (pmd_t *) __srmmu_get_nocache(
                            SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
                        if (pmdp == NULL)
                                early_pgtable_allocfail("pmd");
                        memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
-                       srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
+                       pgd_set(__nocache_fix(pgdp), pmdp);
                }
-               pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
+               pmdp = pmd_offset(__nocache_fix(pgdp), start);
                if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
                        ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
                        if (ptep == NULL)
                                early_pgtable_allocfail("pte");
                        memset(__nocache_fix(ptep), 0, PTE_SIZE);
-                       srmmu_pmd_set(__nocache_fix(pmdp), ptep);
+                       pmd_set(__nocache_fix(pmdp), ptep);
                }
                if (start > (0xffffffffUL - PMD_SIZE))
                        break;
@@ -1096,21 +668,21 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
 
        while(start < end) {
                pgdp = pgd_offset_k(start);
-               if(srmmu_pgd_none(*pgdp)) {
+               if (pgd_none(*pgdp)) {
                        pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
                        if (pmdp == NULL)
                                early_pgtable_allocfail("pmd");
                        memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
-                       srmmu_pgd_set(pgdp, pmdp);
+                       pgd_set(pgdp, pmdp);
                }
-               pmdp = srmmu_pmd_offset(pgdp, start);
+               pmdp = pmd_offset(pgdp, start);
                if(srmmu_pmd_none(*pmdp)) {
                        ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
                                                             PTE_SIZE);
                        if (ptep == NULL)
                                early_pgtable_allocfail("pte");
                        memset(ptep, 0, PTE_SIZE);
-                       srmmu_pmd_set(pmdp, ptep);
+                       pmd_set(pmdp, ptep);
                }
                if (start > (0xffffffffUL - PMD_SIZE))
                        break;
@@ -1162,21 +734,21 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                        start += SRMMU_PGDIR_SIZE;
                        continue;
                }
-               if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+               if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
                        pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
                        if (pmdp == NULL)
                                early_pgtable_allocfail("pmd");
                        memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
-                       srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
+                       pgd_set(__nocache_fix(pgdp), pmdp);
                }
-               pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
+               pmdp = pmd_offset(__nocache_fix(pgdp), start);
                if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
                        ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
                                                             PTE_SIZE);
                        if (ptep == NULL)
                                early_pgtable_allocfail("pte");
                        memset(__nocache_fix(ptep), 0, PTE_SIZE);
-                       srmmu_pmd_set(__nocache_fix(pmdp), ptep);
+                       pmd_set(__nocache_fix(pmdp), ptep);
                }
                if(what == 1) {
                        /*
@@ -1190,7 +762,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                        start += SRMMU_REAL_PMD_SIZE;
                        continue;
                }
-               ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
+               ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
                *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
                start += PAGE_SIZE;
        }
@@ -1249,8 +821,6 @@ static inline void map_kernel(void)
        for (i = 0; sp_banks[i].num_bytes != 0; i++) {
                map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
        }
-
-       BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE);
 }
 
 /* Paging initialization on the Sparc Reference MMU. */
@@ -1312,7 +882,7 @@ void __init srmmu_paging_init(void)
        srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
 #ifdef CONFIG_SMP
        /* Stop from hanging here... */
-       local_flush_tlb_all();
+       local_ops->tlb_all();
 #else
        flush_tlb_all();
 #endif
@@ -1326,8 +896,8 @@ void __init srmmu_paging_init(void)
        srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
 
        pgd = pgd_offset_k(PKMAP_BASE);
-       pmd = srmmu_pmd_offset(pgd, PKMAP_BASE);
-       pte = srmmu_pte_offset(pmd, PKMAP_BASE);
+       pmd = pmd_offset(pgd, PKMAP_BASE);
+       pte = pte_offset_kernel(pmd, PKMAP_BASE);
        pkmap_page_table = pte;
 
        flush_cache_all();
@@ -1359,7 +929,7 @@ void __init srmmu_paging_init(void)
        }
 }
 
-static void srmmu_mmu_info(struct seq_file *m)
+void mmu_info(struct seq_file *m)
 {
        seq_printf(m, 
                   "MMU type\t: %s\n"
@@ -1372,11 +942,7 @@ static void srmmu_mmu_info(struct seq_file *m)
                   srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
 }
 
-static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
-{
-}
-
-static void srmmu_destroy_context(struct mm_struct *mm)
+void destroy_context(struct mm_struct *mm)
 {
 
        if(mm->context != NO_CONTEXT) {
@@ -1474,6 +1040,20 @@ static void __cpuinit poke_hypersparc(void)
        clear = srmmu_get_fstatus();
 }
 
+static const struct sparc32_cachetlb_ops hypersparc_ops = {
+       .cache_all      = hypersparc_flush_cache_all,
+       .cache_mm       = hypersparc_flush_cache_mm,
+       .cache_page     = hypersparc_flush_cache_page,
+       .cache_range    = hypersparc_flush_cache_range,
+       .tlb_all        = hypersparc_flush_tlb_all,
+       .tlb_mm         = hypersparc_flush_tlb_mm,
+       .tlb_page       = hypersparc_flush_tlb_page,
+       .tlb_range      = hypersparc_flush_tlb_range,
+       .page_to_ram    = hypersparc_flush_page_to_ram,
+       .sig_insns      = hypersparc_flush_sig_insns,
+       .page_for_dma   = hypersparc_flush_page_for_dma,
+};
+
 static void __init init_hypersparc(void)
 {
        srmmu_name = "ROSS HyperSparc";
@@ -1482,118 +1062,13 @@ static void __init init_hypersparc(void)
        init_vac_layout();
 
        is_hypersparc = 1;
-
-       BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
-
+       sparc32_cachetlb_ops = &hypersparc_ops;
 
        poke_srmmu = poke_hypersparc;
 
        hypersparc_setup_blockops();
 }
 
-static void __cpuinit poke_cypress(void)
-{
-       unsigned long mreg = srmmu_get_mmureg();
-       unsigned long faddr, tagval;
-       volatile unsigned long cypress_sucks;
-       volatile unsigned long clear;
-
-       clear = srmmu_get_faddr();
-       clear = srmmu_get_fstatus();
-
-       if (!(mreg & CYPRESS_CENABLE)) {
-               for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
-                       __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
-                                            "sta %%g0, [%0] %2\n\t" : :
-                                            "r" (faddr), "r" (0x40000),
-                                            "i" (ASI_M_DATAC_TAG));
-               }
-       } else {
-               for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
-                       __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
-                                            "=r" (tagval) :
-                                            "r" (faddr), "r" (0x40000),
-                                            "i" (ASI_M_DATAC_TAG));
-
-                       /* If modified and valid, kick it. */
-                       if((tagval & 0x60) == 0x60)
-                               cypress_sucks = *(unsigned long *)
-                                                       (0xf0020000 + faddr);
-               }
-       }
-
-       /* And one more, for our good neighbor, Mr. Broken Cypress. */
-       clear = srmmu_get_faddr();
-       clear = srmmu_get_fstatus();
-
-       mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
-       srmmu_set_mmureg(mreg);
-}
-
-static void __init init_cypress_common(void)
-{
-       init_vac_layout();
-
-       BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
-
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
-
-       poke_srmmu = poke_cypress;
-}
-
-static void __init init_cypress_604(void)
-{
-       srmmu_name = "ROSS Cypress-604(UP)";
-       srmmu_modtype = Cypress;
-       init_cypress_common();
-}
-
-static void __init init_cypress_605(unsigned long mrev)
-{
-       srmmu_name = "ROSS Cypress-605(MP)";
-       if(mrev == 0xe) {
-               srmmu_modtype = Cypress_vE;
-               hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
-       } else {
-               if(mrev == 0xd) {
-                       srmmu_modtype = Cypress_vD;
-                       hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
-               } else {
-                       srmmu_modtype = Cypress;
-               }
-       }
-       init_cypress_common();
-}
-
 static void __cpuinit poke_swift(void)
 {
        unsigned long mreg;
@@ -1617,6 +1092,20 @@ static void __cpuinit poke_swift(void)
        srmmu_set_mmureg(mreg);
 }
 
+static const struct sparc32_cachetlb_ops swift_ops = {
+       .cache_all      = swift_flush_cache_all,
+       .cache_mm       = swift_flush_cache_mm,
+       .cache_page     = swift_flush_cache_page,
+       .cache_range    = swift_flush_cache_range,
+       .tlb_all        = swift_flush_tlb_all,
+       .tlb_mm         = swift_flush_tlb_mm,
+       .tlb_page       = swift_flush_tlb_page,
+       .tlb_range      = swift_flush_tlb_range,
+       .page_to_ram    = swift_flush_page_to_ram,
+       .sig_insns      = swift_flush_sig_insns,
+       .page_for_dma   = swift_flush_page_for_dma,
+};
+
 #define SWIFT_MASKID_ADDR  0x10003018
 static void __init init_swift(void)
 {
@@ -1667,23 +1156,7 @@ static void __init init_swift(void)
                break;
        }
 
-       BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
-
-
-       BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
-
+       sparc32_cachetlb_ops = &swift_ops;
        flush_page_for_dma_global = 0;
 
        /*
@@ -1816,26 +1289,25 @@ static void __cpuinit poke_turbosparc(void)
        srmmu_set_mmureg(mreg);
 }
 
+static const struct sparc32_cachetlb_ops turbosparc_ops = {
+       .cache_all      = turbosparc_flush_cache_all,
+       .cache_mm       = turbosparc_flush_cache_mm,
+       .cache_page     = turbosparc_flush_cache_page,
+       .cache_range    = turbosparc_flush_cache_range,
+       .tlb_all        = turbosparc_flush_tlb_all,
+       .tlb_mm         = turbosparc_flush_tlb_mm,
+       .tlb_page       = turbosparc_flush_tlb_page,
+       .tlb_range      = turbosparc_flush_tlb_range,
+       .page_to_ram    = turbosparc_flush_page_to_ram,
+       .sig_insns      = turbosparc_flush_sig_insns,
+       .page_for_dma   = turbosparc_flush_page_for_dma,
+};
+
 static void __init init_turbosparc(void)
 {
        srmmu_name = "Fujitsu TurboSparc";
        srmmu_modtype = TurboSparc;
-
-       BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
-
+       sparc32_cachetlb_ops = &turbosparc_ops;
        poke_srmmu = poke_turbosparc;
 }
 
@@ -1850,6 +1322,20 @@ static void __cpuinit poke_tsunami(void)
        srmmu_set_mmureg(mreg);
 }
 
+static const struct sparc32_cachetlb_ops tsunami_ops = {
+       .cache_all      = tsunami_flush_cache_all,
+       .cache_mm       = tsunami_flush_cache_mm,
+       .cache_page     = tsunami_flush_cache_page,
+       .cache_range    = tsunami_flush_cache_range,
+       .tlb_all        = tsunami_flush_tlb_all,
+       .tlb_mm         = tsunami_flush_tlb_mm,
+       .tlb_page       = tsunami_flush_tlb_page,
+       .tlb_range      = tsunami_flush_tlb_range,
+       .page_to_ram    = tsunami_flush_page_to_ram,
+       .sig_insns      = tsunami_flush_sig_insns,
+       .page_for_dma   = tsunami_flush_page_for_dma,
+};
+
 static void __init init_tsunami(void)
 {
        /*
@@ -1860,22 +1346,7 @@ static void __init init_tsunami(void)
 
        srmmu_name = "TI Tsunami";
        srmmu_modtype = Tsunami;
-
-       BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
-
-
-       BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
-
+       sparc32_cachetlb_ops = &tsunami_ops;
        poke_srmmu = poke_tsunami;
 
        tsunami_setup_blockops();
@@ -1886,7 +1357,7 @@ static void __cpuinit poke_viking(void)
        unsigned long mreg = srmmu_get_mmureg();
        static int smp_catch;
 
-       if(viking_mxcc_present) {
+       if (viking_mxcc_present) {
                unsigned long mxcc_control = mxcc_get_creg();
 
                mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
@@ -1923,6 +1394,52 @@ static void __cpuinit poke_viking(void)
        srmmu_set_mmureg(mreg);
 }
 
+static struct sparc32_cachetlb_ops viking_ops = {
+       .cache_all      = viking_flush_cache_all,
+       .cache_mm       = viking_flush_cache_mm,
+       .cache_page     = viking_flush_cache_page,
+       .cache_range    = viking_flush_cache_range,
+       .tlb_all        = viking_flush_tlb_all,
+       .tlb_mm         = viking_flush_tlb_mm,
+       .tlb_page       = viking_flush_tlb_page,
+       .tlb_range      = viking_flush_tlb_range,
+       .page_to_ram    = viking_flush_page_to_ram,
+       .sig_insns      = viking_flush_sig_insns,
+       .page_for_dma   = viking_flush_page_for_dma,
+};
+
+#ifdef CONFIG_SMP
+/* On sun4d the cpu broadcasts local TLB flushes, so we can just
+ * perform the local TLB flush and all the other cpus will see it.
+ * But, unfortunately, there is a bug in the sun4d XBUS backplane
+ * that requires that we add some synchronization to these flushes.
+ *
+ * The bug is that the fifo which keeps track of all the pending TLB
+ * broadcasts in the system is an entry or two too small, so if we
+ * have too many going at once we'll overflow that fifo and lose a TLB
+ * flush resulting in corruption.
+ *
+ * Our workaround is to take a global spinlock around the TLB flushes,
+ * which guarentees we won't ever have too many pending.  It's a big
+ * hammer, but a semaphore like system to make sure we only have N TLB
+ * flushes going at once will require SMP locking anyways so there's
+ * no real value in trying any harder than this.
+ */
+static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
+       .cache_all      = viking_flush_cache_all,
+       .cache_mm       = viking_flush_cache_mm,
+       .cache_page     = viking_flush_cache_page,
+       .cache_range    = viking_flush_cache_range,
+       .tlb_all        = sun4dsmp_flush_tlb_all,
+       .tlb_mm         = sun4dsmp_flush_tlb_mm,
+       .tlb_page       = sun4dsmp_flush_tlb_page,
+       .tlb_range      = sun4dsmp_flush_tlb_range,
+       .page_to_ram    = viking_flush_page_to_ram,
+       .sig_insns      = viking_flush_sig_insns,
+       .page_for_dma   = viking_flush_page_for_dma,
+};
+#endif
+
 static void __init init_viking(void)
 {
        unsigned long mreg = srmmu_get_mmureg();
@@ -1933,10 +1450,6 @@ static void __init init_viking(void)
                viking_mxcc_present = 0;
                msi_set_sync();
 
-               BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
-
                /*
                 * We need this to make sure old viking takes no hits
                 * on it's cache for dma snoops to workaround the
@@ -1944,76 +1457,101 @@ static void __init init_viking(void)
                 * This is only necessary because of the new way in
                 * which we use the IOMMU.
                 */
-               BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
-
+               viking_ops.page_for_dma = viking_flush_page;
+#ifdef CONFIG_SMP
+               viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
+#endif
                flush_page_for_dma_global = 0;
        } else {
                srmmu_name = "TI Viking/MXCC";
                viking_mxcc_present = 1;
-
                srmmu_cache_pagetables = 1;
-
-               /* MXCC vikings lack the DMA snooping bug. */
-               BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
        }
 
-       BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
-
+       sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+               &viking_ops;
 #ifdef CONFIG_SMP
-       if (sparc_cpu_model == sun4d) {
-               BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
-       } else
+       if (sparc_cpu_model == sun4d)
+               sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+                       &viking_sun4d_smp_ops;
 #endif
-       {
-               BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
-       }
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
 
        poke_srmmu = poke_viking;
 }
 
 #ifdef CONFIG_SPARC_LEON
+static void leon_flush_cache_mm(struct mm_struct *mm)
+{
+       leon_flush_cache_all();
+}
 
-void __init poke_leonsparc(void)
+static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
 {
+       leon_flush_pcache_all(vma, page);
 }
 
-void __init init_leon(void)
+static void leon_flush_cache_range(struct vm_area_struct *vma,
+                                  unsigned long start,
+                                  unsigned long end)
 {
+       leon_flush_cache_all();
+}
 
-       srmmu_name = "LEON";
+static void leon_flush_tlb_mm(struct mm_struct *mm)
+{
+       leon_flush_tlb_all();
+}
+
+static void leon_flush_tlb_page(struct vm_area_struct *vma,
+                               unsigned long page)
+{
+       leon_flush_tlb_all();
+}
+
+static void leon_flush_tlb_range(struct vm_area_struct *vma,
+                                unsigned long start,
+                                unsigned long end)
+{
+       leon_flush_tlb_all();
+}
+
+static void leon_flush_page_to_ram(unsigned long page)
+{
+       leon_flush_cache_all();
+}
+
+static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
+{
+       leon_flush_cache_all();
+}
+
+static void leon_flush_page_for_dma(unsigned long page)
+{
+       leon_flush_dcache_all();
+}
+
+void __init poke_leonsparc(void)
+{
+}
 
-       BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
-                       BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
-                       BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
-                       BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
-                       BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
-                       BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
-                       BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
+static const struct sparc32_cachetlb_ops leon_ops = {
+       .cache_all      = leon_flush_cache_all,
+       .cache_mm       = leon_flush_cache_mm,
+       .cache_page     = leon_flush_cache_page,
+       .cache_range    = leon_flush_cache_range,
+       .tlb_all        = leon_flush_tlb_all,
+       .tlb_mm         = leon_flush_tlb_mm,
+       .tlb_page       = leon_flush_tlb_page,
+       .tlb_range      = leon_flush_tlb_range,
+       .page_to_ram    = leon_flush_page_to_ram,
+       .sig_insns      = leon_flush_sig_insns,
+       .page_for_dma   = leon_flush_page_for_dma,
+};
 
+void __init init_leon(void)
+{
+       srmmu_name = "LEON";
+       sparc32_cachetlb_ops = &leon_ops;
        poke_srmmu = poke_leonsparc;
 
        srmmu_cache_pagetables = 0;
@@ -2052,22 +1590,15 @@ static void __init get_srmmu_type(void)
                        break;
                case 0:
                case 2:
-                       /* Uniprocessor Cypress */
-                       init_cypress_604();
-                       break;
                case 10:
                case 11:
                case 12:
-                       /* _REALLY OLD_ Cypress MP chips... */
                case 13:
                case 14:
                case 15:
-                       /* MP Cypress mmu/cache-controller */
-                       init_cypress_605(mod_rev);
-                       break;
                default:
-                       /* Some other Cypress revision, assume a 605. */
-                       init_cypress_605(mod_rev);
+                       prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
+                       prom_halt();
                        break;
                }
                return;
@@ -2123,203 +1654,193 @@ static void __init get_srmmu_type(void)
        srmmu_is_bad();
 }
 
-/* don't laugh, static pagetables */
-static void srmmu_check_pgt_cache(int low, int high)
+#ifdef CONFIG_SMP
+/* Local cross-calls. */
+static void smp_flush_page_for_dma(unsigned long page)
 {
+       xc1((smpfunc_t) local_ops->page_for_dma, page);
+       local_ops->page_for_dma(page);
 }
 
-extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
-       tsetup_mmu_patchme, rtrap_mmu_patchme;
-
-extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
-       tsetup_srmmu_stackchk, srmmu_rett_stackchk;
-
-extern unsigned long srmmu_fault;
-
-#define PATCH_BRANCH(insn, dest) do { \
-               iaddr = &(insn); \
-               daddr = &(dest); \
-               *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
-       } while(0)
-
-static void __init patch_window_trap_handlers(void)
+static void smp_flush_cache_all(void)
 {
-       unsigned long *iaddr, *daddr;
-       
-       PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
-       PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
-       PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
-       PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
-       PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
-       PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
-       PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
+       xc0((smpfunc_t) local_ops->cache_all);
+       local_ops->cache_all();
 }
 
-#ifdef CONFIG_SMP
-/* Local cross-calls. */
-static void smp_flush_page_for_dma(unsigned long page)
+static void smp_flush_tlb_all(void)
 {
-       xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
-       local_flush_page_for_dma(page);
+       xc0((smpfunc_t) local_ops->tlb_all);
+       local_ops->tlb_all();
 }
 
-#endif
-
-static pte_t srmmu_pgoff_to_pte(unsigned long pgoff)
+static void smp_flush_cache_mm(struct mm_struct *mm)
 {
-       return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
+               local_ops->cache_mm(mm);
+       }
 }
 
-static unsigned long srmmu_pte_to_pgoff(pte_t pte)
+static void smp_flush_tlb_mm(struct mm_struct *mm)
 {
-       return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask)) {
+                       xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
+                       if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
+                               cpumask_copy(mm_cpumask(mm),
+                                            cpumask_of(smp_processor_id()));
+               }
+               local_ops->tlb_mm(mm);
+       }
 }
 
-static pgprot_t srmmu_pgprot_noncached(pgprot_t prot)
+static void smp_flush_cache_range(struct vm_area_struct *vma,
+                                 unsigned long start,
+                                 unsigned long end)
 {
-       prot &= ~__pgprot(SRMMU_CACHE);
+       struct mm_struct *mm = vma->vm_mm;
 
-       return prot;
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc3((smpfunc_t) local_ops->cache_range,
+                           (unsigned long) vma, start, end);
+               local_ops->cache_range(vma, start, end);
+       }
 }
 
-/* Load up routines and constants for sun4m and sun4d mmu */
-void __init ld_mmu_srmmu(void)
+static void smp_flush_tlb_range(struct vm_area_struct *vma,
+                               unsigned long start,
+                               unsigned long end)
 {
-       extern void ld_mmu_iommu(void);
-       extern void ld_mmu_iounit(void);
-       extern void ___xchg32_sun4md(void);
-
-       BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
-       BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
-       BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
-
-       BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
-       BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
-
-       BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
-       PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
-       BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
-       BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
-       BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
-       page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+       struct mm_struct *mm = vma->vm_mm;
 
-       /* Functions */
-       BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM);
-#ifndef CONFIG_SMP     
-       BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
-#endif
-       BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP);
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc3((smpfunc_t) local_ops->tlb_range,
+                           (unsigned long) vma, start, end);
+               local_ops->tlb_range(vma, start, end);
+       }
+}
 
-       BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
-       BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM);
+static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+       struct mm_struct *mm = vma->vm_mm;
 
-       BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc2((smpfunc_t) local_ops->cache_page,
+                           (unsigned long) vma, page);
+               local_ops->cache_page(vma, page);
+       }
+}
 
-       BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
+static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+       struct mm_struct *mm = vma->vm_mm;
 
-       BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0);
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc2((smpfunc_t) local_ops->tlb_page,
+                           (unsigned long) vma, page);
+               local_ops->tlb_page(vma, page);
+       }
+}
 
-       BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
+static void smp_flush_page_to_ram(unsigned long page)
+{
+       /* Current theory is that those who call this are the one's
+        * who have just dirtied their cache with the pages contents
+        * in kernel space, therefore we only run this on local cpu.
+        *
+        * XXX This experiment failed, research further... -DaveM
+        */
+#if 1
+       xc1((smpfunc_t) local_ops->page_to_ram, page);
+#endif
+       local_ops->page_to_ram(page);
+}
+
+static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+       cpumask_t cpu_mask;
+       cpumask_copy(&cpu_mask, mm_cpumask(mm));
+       cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+       if (!cpumask_empty(&cpu_mask))
+               xc2((smpfunc_t) local_ops->sig_insns,
+                   (unsigned long) mm, insn_addr);
+       local_ops->sig_insns(mm, insn_addr);
+}
+
+static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
+       .cache_all      = smp_flush_cache_all,
+       .cache_mm       = smp_flush_cache_mm,
+       .cache_page     = smp_flush_cache_page,
+       .cache_range    = smp_flush_cache_range,
+       .tlb_all        = smp_flush_tlb_all,
+       .tlb_mm         = smp_flush_tlb_mm,
+       .tlb_page       = smp_flush_tlb_page,
+       .tlb_range      = smp_flush_tlb_range,
+       .page_to_ram    = smp_flush_page_to_ram,
+       .sig_insns      = smp_flush_sig_insns,
+       .page_for_dma   = smp_flush_page_for_dma,
+};
+#endif
 
-       BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
-       
-       BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
-       BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
-       BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
-       BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
-       BTFIXUPSET_HALF(pte_filei, SRMMU_FILE);
-       BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
-       BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
-       BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
-       BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
-       BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
-       BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
-       BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM);
+/* Load up routines and constants for sun4m and sun4d mmu */
+void __init load_mmu(void)
+{
+       extern void ld_mmu_iommu(void);
+       extern void ld_mmu_iounit(void);
 
+       /* Functions */
        get_srmmu_type();
-       patch_window_trap_handlers();
 
 #ifdef CONFIG_SMP
        /* El switcheroo... */
+       local_ops = sparc32_cachetlb_ops;
 
-       BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
-       BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
-       BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
-       BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
-       BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
-       BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
-       BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
-       BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
-       BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
-       BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
-       BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
-
-       BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
-       if (sparc_cpu_model != sun4d &&
-           sparc_cpu_model != sparc_leon) {
-               BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
+       if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
+               smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
+               smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
+               smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
+               smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
        }
-       BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
 
        if (poke_srmmu == poke_viking) {
                /* Avoid unnecessary cross calls. */
-               BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
-               BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
-               BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
-               BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
-               BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
-               BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
-               BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
+               smp_cachetlb_ops.cache_all = local_ops->cache_all;
+               smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
+               smp_cachetlb_ops.cache_range = local_ops->cache_range;
+               smp_cachetlb_ops.cache_page = local_ops->cache_page;
+
+               smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
+               smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
+               smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
        }
+
+       /* It really is const after this point. */
+       sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+               &smp_cachetlb_ops;
 #endif
 
        if (sparc_cpu_model == sun4d)