sparc32: Remove cypress cpu support.
[linux-block.git] / arch / sparc / mm / srmmu.c
index b01c735b581550c68510cd9ff016528adc0c9035..4875fcd8fd7a7f87f7d3a2e5b4ae7bb818890f5f 100644 (file)
@@ -48,8 +48,6 @@
 #include <asm/turbosparc.h>
 #include <asm/leon.h>
 
-#include <asm/btfixup.h>
-
 enum mbus_module srmmu_modtype;
 static unsigned int hwbug_bitmask;
 int vac_cache_size;
@@ -65,24 +63,20 @@ extern unsigned long last_valid_pfn;
 
 static pgd_t *srmmu_swapper_pg_dir;
 
+const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
+
 #ifdef CONFIG_SMP
+const struct sparc32_cachetlb_ops *local_ops;
+
 #define FLUSH_BEGIN(mm)
 #define FLUSH_END
 #else
-#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
+#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
 #define FLUSH_END      }
 #endif
 
-BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
-#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
-
 int flush_page_for_dma_global = 1;
 
-#ifdef CONFIG_SMP
-BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
-#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
-#endif
-
 char *srmmu_name;
 
 ctxd_t *srmmu_ctx_table_phys;
@@ -109,10 +103,6 @@ void *srmmu_nocache_pool;
 void *srmmu_nocache_bitmap;
 static struct bit_map srmmu_nocache_map;
 
-static inline unsigned long srmmu_pgd_page(pgd_t pgd)
-{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
-
-
 static inline int srmmu_pte_none(pte_t pte)
 { return !(pte_val(pte) & 0xFFFFFFF); }
 
@@ -128,23 +118,11 @@ static inline pte_t srmmu_pte_mkclean(pte_t pte)
 static inline pte_t srmmu_pte_mkold(pte_t pte)
 { return __pte(pte_val(pte) & ~SRMMU_REF);}
 
-static inline pte_t srmmu_pte_mkwrite(pte_t pte)
-{ return __pte(pte_val(pte) | SRMMU_WRITE);}
-
-static inline pte_t srmmu_pte_mkdirty(pte_t pte)
-{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
-
-static inline pte_t srmmu_pte_mkyoung(pte_t pte)
-{ return __pte(pte_val(pte) | SRMMU_REF);}
-
 /* XXX should we hyper_flush_whole_icache here - Anton */
 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
 { set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
 
-static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
-{ set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
-
-static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
+void pmd_set(pmd_t *pmdp, pte_t *ptep)
 {
        unsigned long ptp;      /* Physical address, shifted right by 4 */
        int i;
@@ -156,7 +134,7 @@ static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
        }
 }
 
-static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
+void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
 {
        unsigned long ptp;      /* Physical address, shifted right by 4 */
        int i;
@@ -175,15 +153,8 @@ static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
 static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
 { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
 
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
-{
-       return (pmd_t *) srmmu_pgd_page(*dir) +
-           ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-}
-
 /* Find an entry in the third-level page table.. */ 
-static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
+pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
 {
        void *pte;
 
@@ -192,23 +163,6 @@ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
            ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 }
 
-static unsigned long srmmu_swp_type(swp_entry_t entry)
-{
-       return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
-}
-
-static unsigned long srmmu_swp_offset(swp_entry_t entry)
-{
-       return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
-}
-
-static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
-{
-       return (swp_entry_t) {
-                 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
-               | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
-}
-
 /*
  * size: bytes to allocate in the nocache area.
  * align: bytes, number to align at.
@@ -241,7 +195,7 @@ static unsigned long __srmmu_get_nocache(int size, int align)
        return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
 }
 
-static unsigned long srmmu_get_nocache(int size, int align)
+unsigned long srmmu_get_nocache(int size, int align)
 {
        unsigned long tmp;
 
@@ -253,7 +207,7 @@ static unsigned long srmmu_get_nocache(int size, int align)
        return tmp;
 }
 
-static void srmmu_free_nocache(unsigned long vaddr, int size)
+void srmmu_free_nocache(unsigned long vaddr, int size)
 {
        int offset;
 
@@ -345,8 +299,8 @@ static void __init srmmu_nocache_init(void)
 
        while (vaddr < srmmu_nocache_end) {
                pgd = pgd_offset_k(vaddr);
-               pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
-               pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr);
+               pmd = pmd_offset(__nocache_fix(pgd), vaddr);
+               pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
 
                pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
 
@@ -363,7 +317,7 @@ static void __init srmmu_nocache_init(void)
        flush_tlb_all();
 }
 
-static inline pgd_t *srmmu_get_pgd_fast(void)
+pgd_t *get_pgd_fast(void)
 {
        pgd_t *pgd = NULL;
 
@@ -378,21 +332,6 @@ static inline pgd_t *srmmu_get_pgd_fast(void)
        return pgd;
 }
 
-static void srmmu_free_pgd_fast(pgd_t *pgd)
-{
-       srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
-}
-
-static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
-       return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
-}
-
-static void srmmu_pmd_free(pmd_t * pmd)
-{
-       srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
-}
-
 /*
  * Hardware needs alignment to 256 only, but we align to whole page size
  * to reduce fragmentation problems due to the buddy principle.
@@ -401,31 +340,19 @@ static void srmmu_pmd_free(pmd_t * pmd)
  * Alignments up to the page size are the same for physical and virtual
  * addresses of the nocache area.
  */
-static pte_t *
-srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-       return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
-}
-
-static pgtable_t
-srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        unsigned long pte;
        struct page *page;
 
-       if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
+       if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
                return NULL;
        page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
        pgtable_page_ctor(page);
        return page;
 }
 
-static void srmmu_free_pte_fast(pte_t *pte)
-{
-       srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
-}
-
-static void srmmu_pte_free(pgtable_t pte)
+void pte_free(struct mm_struct *mm, pgtable_t pte)
 {
        unsigned long p;
 
@@ -506,8 +433,8 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
 
        physaddr &= PAGE_MASK;
        pgdp = pgd_offset_k(virt_addr);
-       pmdp = srmmu_pmd_offset(pgdp, virt_addr);
-       ptep = srmmu_pte_offset(pmdp, virt_addr);
+       pmdp = pmd_offset(pgdp, virt_addr);
+       ptep = pte_offset_kernel(pmdp, virt_addr);
        tmp = (physaddr >> 4) | SRMMU_ET_PTE;
 
        /*
@@ -521,8 +448,8 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
        set_pte(ptep, __pte(tmp));
 }
 
-static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
-    unsigned long xva, unsigned int len)
+void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
+                     unsigned long xva, unsigned int len)
 {
        while (len != 0) {
                len -= PAGE_SIZE;
@@ -540,14 +467,14 @@ static inline void srmmu_unmapioaddr(unsigned long virt_addr)
        pte_t *ptep;
 
        pgdp = pgd_offset_k(virt_addr);
-       pmdp = srmmu_pmd_offset(pgdp, virt_addr);
-       ptep = srmmu_pte_offset(pmdp, virt_addr);
+       pmdp = pmd_offset(pgdp, virt_addr);
+       ptep = pte_offset_kernel(pmdp, virt_addr);
 
        /* No need to flush uncacheable page. */
        __pte_clear(ptep);
 }
 
-static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
+void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
 {
        while (len != 0) {
                len -= PAGE_SIZE;
@@ -598,38 +525,6 @@ extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long st
 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
 extern void tsunami_setup_blockops(void);
 
-/*
- * Workaround, until we find what's going on with Swift. When low on memory,
- * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
- * out it is already in page tables/ fault again on the same instruction.
- * I really don't understand it, have checked it and contexts
- * are right, flush_tlb_all is done as well, and it faults again...
- * Strange. -jj
- *
- * The following code is a deadwood that may be necessary when
- * we start to make precise page flushes again. --zaitcev
- */
-static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
-{
-#if 0
-       static unsigned long last;
-       unsigned int val;
-       /* unsigned int n; */
-
-       if (address == last) {
-               val = srmmu_hwprobe(address);
-               if (val != 0 && pte_val(*ptep) != val) {
-                       printk("swift_update_mmu_cache: "
-                           "addr %lx put %08x probed %08x from %pf\n",
-                           address, pte_val(*ptep), val,
-                           __builtin_return_address(0));
-                       srmmu_flush_whole_tlb();
-               }
-       }
-       last = address;
-#endif
-}
-
 /* swift.S */
 extern void swift_flush_cache_all(void);
 extern void swift_flush_cache_mm(struct mm_struct *mm);
@@ -682,244 +577,6 @@ void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  * with respect to cache coherency.
  */
 
-/* Cypress flushes. */
-static void cypress_flush_cache_all(void)
-{
-       volatile unsigned long cypress_sucks;
-       unsigned long faddr, tagval;
-
-       flush_user_windows();
-       for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
-               __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
-                                    "=r" (tagval) :
-                                    "r" (faddr), "r" (0x40000),
-                                    "i" (ASI_M_DATAC_TAG));
-
-               /* If modified and valid, kick it. */
-               if((tagval & 0x60) == 0x60)
-                       cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
-       }
-}
-
-static void cypress_flush_cache_mm(struct mm_struct *mm)
-{
-       register unsigned long a, b, c, d, e, f, g;
-       unsigned long flags, faddr;
-       int octx;
-
-       FLUSH_BEGIN(mm)
-       flush_user_windows();
-       local_irq_save(flags);
-       octx = srmmu_get_context();
-       srmmu_set_context(mm->context);
-       a = 0x20; b = 0x40; c = 0x60;
-       d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-
-       faddr = (0x10000 - 0x100);
-       goto inside;
-       do {
-               faddr -= 0x100;
-       inside:
-               __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-                                    "sta %%g0, [%0 + %2] %1\n\t"
-                                    "sta %%g0, [%0 + %3] %1\n\t"
-                                    "sta %%g0, [%0 + %4] %1\n\t"
-                                    "sta %%g0, [%0 + %5] %1\n\t"
-                                    "sta %%g0, [%0 + %6] %1\n\t"
-                                    "sta %%g0, [%0 + %7] %1\n\t"
-                                    "sta %%g0, [%0 + %8] %1\n\t" : :
-                                    "r" (faddr), "i" (ASI_M_FLUSH_CTX),
-                                    "r" (a), "r" (b), "r" (c), "r" (d),
-                                    "r" (e), "r" (f), "r" (g));
-       } while(faddr);
-       srmmu_set_context(octx);
-       local_irq_restore(flags);
-       FLUSH_END
-}
-
-static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       register unsigned long a, b, c, d, e, f, g;
-       unsigned long flags, faddr;
-       int octx;
-
-       FLUSH_BEGIN(mm)
-       flush_user_windows();
-       local_irq_save(flags);
-       octx = srmmu_get_context();
-       srmmu_set_context(mm->context);
-       a = 0x20; b = 0x40; c = 0x60;
-       d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-
-       start &= SRMMU_REAL_PMD_MASK;
-       while(start < end) {
-               faddr = (start + (0x10000 - 0x100));
-               goto inside;
-               do {
-                       faddr -= 0x100;
-               inside:
-                       __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-                                            "sta %%g0, [%0 + %2] %1\n\t"
-                                            "sta %%g0, [%0 + %3] %1\n\t"
-                                            "sta %%g0, [%0 + %4] %1\n\t"
-                                            "sta %%g0, [%0 + %5] %1\n\t"
-                                            "sta %%g0, [%0 + %6] %1\n\t"
-                                            "sta %%g0, [%0 + %7] %1\n\t"
-                                            "sta %%g0, [%0 + %8] %1\n\t" : :
-                                            "r" (faddr),
-                                            "i" (ASI_M_FLUSH_SEG),
-                                            "r" (a), "r" (b), "r" (c), "r" (d),
-                                            "r" (e), "r" (f), "r" (g));
-               } while (faddr != start);
-               start += SRMMU_REAL_PMD_SIZE;
-       }
-       srmmu_set_context(octx);
-       local_irq_restore(flags);
-       FLUSH_END
-}
-
-static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
-{
-       register unsigned long a, b, c, d, e, f, g;
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long flags, line;
-       int octx;
-
-       FLUSH_BEGIN(mm)
-       flush_user_windows();
-       local_irq_save(flags);
-       octx = srmmu_get_context();
-       srmmu_set_context(mm->context);
-       a = 0x20; b = 0x40; c = 0x60;
-       d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-
-       page &= PAGE_MASK;
-       line = (page + PAGE_SIZE) - 0x100;
-       goto inside;
-       do {
-               line -= 0x100;
-       inside:
-                       __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-                                            "sta %%g0, [%0 + %2] %1\n\t"
-                                            "sta %%g0, [%0 + %3] %1\n\t"
-                                            "sta %%g0, [%0 + %4] %1\n\t"
-                                            "sta %%g0, [%0 + %5] %1\n\t"
-                                            "sta %%g0, [%0 + %6] %1\n\t"
-                                            "sta %%g0, [%0 + %7] %1\n\t"
-                                            "sta %%g0, [%0 + %8] %1\n\t" : :
-                                            "r" (line),
-                                            "i" (ASI_M_FLUSH_PAGE),
-                                            "r" (a), "r" (b), "r" (c), "r" (d),
-                                            "r" (e), "r" (f), "r" (g));
-       } while(line != page);
-       srmmu_set_context(octx);
-       local_irq_restore(flags);
-       FLUSH_END
-}
-
-/* Cypress is copy-back, at least that is how we configure it. */
-static void cypress_flush_page_to_ram(unsigned long page)
-{
-       register unsigned long a, b, c, d, e, f, g;
-       unsigned long line;
-
-       a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-       page &= PAGE_MASK;
-       line = (page + PAGE_SIZE) - 0x100;
-       goto inside;
-       do {
-               line -= 0x100;
-       inside:
-               __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-                                    "sta %%g0, [%0 + %2] %1\n\t"
-                                    "sta %%g0, [%0 + %3] %1\n\t"
-                                    "sta %%g0, [%0 + %4] %1\n\t"
-                                    "sta %%g0, [%0 + %5] %1\n\t"
-                                    "sta %%g0, [%0 + %6] %1\n\t"
-                                    "sta %%g0, [%0 + %7] %1\n\t"
-                                    "sta %%g0, [%0 + %8] %1\n\t" : :
-                                    "r" (line),
-                                    "i" (ASI_M_FLUSH_PAGE),
-                                    "r" (a), "r" (b), "r" (c), "r" (d),
-                                    "r" (e), "r" (f), "r" (g));
-       } while(line != page);
-}
-
-/* Cypress is also IO cache coherent. */
-static void cypress_flush_page_for_dma(unsigned long page)
-{
-}
-
-/* Cypress has unified L2 VIPT, from which both instructions and data
- * are stored.  It does not have an onboard icache of any sort, therefore
- * no flush is necessary.
- */
-static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
-{
-}
-
-static void cypress_flush_tlb_all(void)
-{
-       srmmu_flush_whole_tlb();
-}
-
-static void cypress_flush_tlb_mm(struct mm_struct *mm)
-{
-       FLUSH_BEGIN(mm)
-       __asm__ __volatile__(
-       "lda    [%0] %3, %%g5\n\t"
-       "sta    %2, [%0] %3\n\t"
-       "sta    %%g0, [%1] %4\n\t"
-       "sta    %%g5, [%0] %3\n"
-       : /* no outputs */
-       : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
-         "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
-       : "g5");
-       FLUSH_END
-}
-
-static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long size;
-
-       FLUSH_BEGIN(mm)
-       start &= SRMMU_PGDIR_MASK;
-       size = SRMMU_PGDIR_ALIGN(end) - start;
-       __asm__ __volatile__(
-               "lda    [%0] %5, %%g5\n\t"
-               "sta    %1, [%0] %5\n"
-               "1:\n\t"
-               "subcc  %3, %4, %3\n\t"
-               "bne    1b\n\t"
-               " sta   %%g0, [%2 + %3] %6\n\t"
-               "sta    %%g5, [%0] %5\n"
-       : /* no outputs */
-       : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
-         "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
-         "i" (ASI_M_FLUSH_PROBE)
-       : "g5", "cc");
-       FLUSH_END
-}
-
-static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
-       struct mm_struct *mm = vma->vm_mm;
-
-       FLUSH_BEGIN(mm)
-       __asm__ __volatile__(
-       "lda    [%0] %3, %%g5\n\t"
-       "sta    %1, [%0] %3\n\t"
-       "sta    %%g0, [%2] %4\n\t"
-       "sta    %%g5, [%0] %3\n"
-       : /* no outputs */
-       : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
-         "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
-       : "g5");
-       FLUSH_END
-}
-
 /* viking.S */
 extern void viking_flush_cache_all(void);
 extern void viking_flush_cache_mm(struct mm_struct *mm);
@@ -986,15 +643,15 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
                        if (pmdp == NULL)
                                early_pgtable_allocfail("pmd");
                        memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
-                       srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
+                       pgd_set(__nocache_fix(pgdp), pmdp);
                }
-               pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
+               pmdp = pmd_offset(__nocache_fix(pgdp), start);
                if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
                        ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
                        if (ptep == NULL)
                                early_pgtable_allocfail("pte");
                        memset(__nocache_fix(ptep), 0, PTE_SIZE);
-                       srmmu_pmd_set(__nocache_fix(pmdp), ptep);
+                       pmd_set(__nocache_fix(pmdp), ptep);
                }
                if (start > (0xffffffffUL - PMD_SIZE))
                        break;
@@ -1016,16 +673,16 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
                        if (pmdp == NULL)
                                early_pgtable_allocfail("pmd");
                        memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
-                       srmmu_pgd_set(pgdp, pmdp);
+                       pgd_set(pgdp, pmdp);
                }
-               pmdp = srmmu_pmd_offset(pgdp, start);
+               pmdp = pmd_offset(pgdp, start);
                if(srmmu_pmd_none(*pmdp)) {
                        ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
                                                             PTE_SIZE);
                        if (ptep == NULL)
                                early_pgtable_allocfail("pte");
                        memset(ptep, 0, PTE_SIZE);
-                       srmmu_pmd_set(pmdp, ptep);
+                       pmd_set(pmdp, ptep);
                }
                if (start > (0xffffffffUL - PMD_SIZE))
                        break;
@@ -1082,16 +739,16 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                        if (pmdp == NULL)
                                early_pgtable_allocfail("pmd");
                        memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
-                       srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
+                       pgd_set(__nocache_fix(pgdp), pmdp);
                }
-               pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
+               pmdp = pmd_offset(__nocache_fix(pgdp), start);
                if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
                        ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
                                                             PTE_SIZE);
                        if (ptep == NULL)
                                early_pgtable_allocfail("pte");
                        memset(__nocache_fix(ptep), 0, PTE_SIZE);
-                       srmmu_pmd_set(__nocache_fix(pmdp), ptep);
+                       pmd_set(__nocache_fix(pmdp), ptep);
                }
                if(what == 1) {
                        /*
@@ -1105,7 +762,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                        start += SRMMU_REAL_PMD_SIZE;
                        continue;
                }
-               ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
+               ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
                *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
                start += PAGE_SIZE;
        }
@@ -1225,7 +882,7 @@ void __init srmmu_paging_init(void)
        srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
 #ifdef CONFIG_SMP
        /* Stop from hanging here... */
-       local_flush_tlb_all();
+       local_ops->tlb_all();
 #else
        flush_tlb_all();
 #endif
@@ -1239,8 +896,8 @@ void __init srmmu_paging_init(void)
        srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
 
        pgd = pgd_offset_k(PKMAP_BASE);
-       pmd = srmmu_pmd_offset(pgd, PKMAP_BASE);
-       pte = srmmu_pte_offset(pmd, PKMAP_BASE);
+       pmd = pmd_offset(pgd, PKMAP_BASE);
+       pte = pte_offset_kernel(pmd, PKMAP_BASE);
        pkmap_page_table = pte;
 
        flush_cache_all();
@@ -1272,7 +929,7 @@ void __init srmmu_paging_init(void)
        }
 }
 
-static void srmmu_mmu_info(struct seq_file *m)
+void mmu_info(struct seq_file *m)
 {
        seq_printf(m, 
                   "MMU type\t: %s\n"
@@ -1285,11 +942,7 @@ static void srmmu_mmu_info(struct seq_file *m)
                   srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
 }
 
-static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
-{
-}
-
-static void srmmu_destroy_context(struct mm_struct *mm)
+void destroy_context(struct mm_struct *mm)
 {
 
        if(mm->context != NO_CONTEXT) {
@@ -1387,6 +1040,20 @@ static void __cpuinit poke_hypersparc(void)
        clear = srmmu_get_fstatus();
 }
 
+static const struct sparc32_cachetlb_ops hypersparc_ops = {
+       .cache_all      = hypersparc_flush_cache_all,
+       .cache_mm       = hypersparc_flush_cache_mm,
+       .cache_page     = hypersparc_flush_cache_page,
+       .cache_range    = hypersparc_flush_cache_range,
+       .tlb_all        = hypersparc_flush_tlb_all,
+       .tlb_mm         = hypersparc_flush_tlb_mm,
+       .tlb_page       = hypersparc_flush_tlb_page,
+       .tlb_range      = hypersparc_flush_tlb_range,
+       .page_to_ram    = hypersparc_flush_page_to_ram,
+       .sig_insns      = hypersparc_flush_sig_insns,
+       .page_for_dma   = hypersparc_flush_page_for_dma,
+};
+
 static void __init init_hypersparc(void)
 {
        srmmu_name = "ROSS HyperSparc";
@@ -1395,112 +1062,13 @@ static void __init init_hypersparc(void)
        init_vac_layout();
 
        is_hypersparc = 1;
-
-       BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
-
+       sparc32_cachetlb_ops = &hypersparc_ops;
 
        poke_srmmu = poke_hypersparc;
 
        hypersparc_setup_blockops();
 }
 
-static void __cpuinit poke_cypress(void)
-{
-       unsigned long mreg = srmmu_get_mmureg();
-       unsigned long faddr, tagval;
-       volatile unsigned long cypress_sucks;
-       volatile unsigned long clear;
-
-       clear = srmmu_get_faddr();
-       clear = srmmu_get_fstatus();
-
-       if (!(mreg & CYPRESS_CENABLE)) {
-               for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
-                       __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
-                                            "sta %%g0, [%0] %2\n\t" : :
-                                            "r" (faddr), "r" (0x40000),
-                                            "i" (ASI_M_DATAC_TAG));
-               }
-       } else {
-               for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
-                       __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
-                                            "=r" (tagval) :
-                                            "r" (faddr), "r" (0x40000),
-                                            "i" (ASI_M_DATAC_TAG));
-
-                       /* If modified and valid, kick it. */
-                       if((tagval & 0x60) == 0x60)
-                               cypress_sucks = *(unsigned long *)
-                                                       (0xf0020000 + faddr);
-               }
-       }
-
-       /* And one more, for our good neighbor, Mr. Broken Cypress. */
-       clear = srmmu_get_faddr();
-       clear = srmmu_get_fstatus();
-
-       mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
-       srmmu_set_mmureg(mreg);
-}
-
-static void __init init_cypress_common(void)
-{
-       init_vac_layout();
-
-       BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
-
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
-
-       poke_srmmu = poke_cypress;
-}
-
-static void __init init_cypress_604(void)
-{
-       srmmu_name = "ROSS Cypress-604(UP)";
-       srmmu_modtype = Cypress;
-       init_cypress_common();
-}
-
-static void __init init_cypress_605(unsigned long mrev)
-{
-       srmmu_name = "ROSS Cypress-605(MP)";
-       if(mrev == 0xe) {
-               srmmu_modtype = Cypress_vE;
-               hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
-       } else {
-               if(mrev == 0xd) {
-                       srmmu_modtype = Cypress_vD;
-                       hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
-               } else {
-                       srmmu_modtype = Cypress;
-               }
-       }
-       init_cypress_common();
-}
-
 static void __cpuinit poke_swift(void)
 {
        unsigned long mreg;
@@ -1524,6 +1092,20 @@ static void __cpuinit poke_swift(void)
        srmmu_set_mmureg(mreg);
 }
 
+static const struct sparc32_cachetlb_ops swift_ops = {
+       .cache_all      = swift_flush_cache_all,
+       .cache_mm       = swift_flush_cache_mm,
+       .cache_page     = swift_flush_cache_page,
+       .cache_range    = swift_flush_cache_range,
+       .tlb_all        = swift_flush_tlb_all,
+       .tlb_mm         = swift_flush_tlb_mm,
+       .tlb_page       = swift_flush_tlb_page,
+       .tlb_range      = swift_flush_tlb_range,
+       .page_to_ram    = swift_flush_page_to_ram,
+       .sig_insns      = swift_flush_sig_insns,
+       .page_for_dma   = swift_flush_page_for_dma,
+};
+
 #define SWIFT_MASKID_ADDR  0x10003018
 static void __init init_swift(void)
 {
@@ -1574,23 +1156,7 @@ static void __init init_swift(void)
                break;
        }
 
-       BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
-
-
-       BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
-
+       sparc32_cachetlb_ops = &swift_ops;
        flush_page_for_dma_global = 0;
 
        /*
@@ -1723,26 +1289,25 @@ static void __cpuinit poke_turbosparc(void)
        srmmu_set_mmureg(mreg);
 }
 
+static const struct sparc32_cachetlb_ops turbosparc_ops = {
+       .cache_all      = turbosparc_flush_cache_all,
+       .cache_mm       = turbosparc_flush_cache_mm,
+       .cache_page     = turbosparc_flush_cache_page,
+       .cache_range    = turbosparc_flush_cache_range,
+       .tlb_all        = turbosparc_flush_tlb_all,
+       .tlb_mm         = turbosparc_flush_tlb_mm,
+       .tlb_page       = turbosparc_flush_tlb_page,
+       .tlb_range      = turbosparc_flush_tlb_range,
+       .page_to_ram    = turbosparc_flush_page_to_ram,
+       .sig_insns      = turbosparc_flush_sig_insns,
+       .page_for_dma   = turbosparc_flush_page_for_dma,
+};
+
 static void __init init_turbosparc(void)
 {
        srmmu_name = "Fujitsu TurboSparc";
        srmmu_modtype = TurboSparc;
-
-       BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
-
+       sparc32_cachetlb_ops = &turbosparc_ops;
        poke_srmmu = poke_turbosparc;
 }
 
@@ -1757,6 +1322,20 @@ static void __cpuinit poke_tsunami(void)
        srmmu_set_mmureg(mreg);
 }
 
+static const struct sparc32_cachetlb_ops tsunami_ops = {
+       .cache_all      = tsunami_flush_cache_all,
+       .cache_mm       = tsunami_flush_cache_mm,
+       .cache_page     = tsunami_flush_cache_page,
+       .cache_range    = tsunami_flush_cache_range,
+       .tlb_all        = tsunami_flush_tlb_all,
+       .tlb_mm         = tsunami_flush_tlb_mm,
+       .tlb_page       = tsunami_flush_tlb_page,
+       .tlb_range      = tsunami_flush_tlb_range,
+       .page_to_ram    = tsunami_flush_page_to_ram,
+       .sig_insns      = tsunami_flush_sig_insns,
+       .page_for_dma   = tsunami_flush_page_for_dma,
+};
+
 static void __init init_tsunami(void)
 {
        /*
@@ -1767,22 +1346,7 @@ static void __init init_tsunami(void)
 
        srmmu_name = "TI Tsunami";
        srmmu_modtype = Tsunami;
-
-       BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
-
-
-       BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
-
+       sparc32_cachetlb_ops = &tsunami_ops;
        poke_srmmu = poke_tsunami;
 
        tsunami_setup_blockops();
@@ -1793,7 +1357,7 @@ static void __cpuinit poke_viking(void)
        unsigned long mreg = srmmu_get_mmureg();
        static int smp_catch;
 
-       if(viking_mxcc_present) {
+       if (viking_mxcc_present) {
                unsigned long mxcc_control = mxcc_get_creg();
 
                mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
@@ -1830,6 +1394,52 @@ static void __cpuinit poke_viking(void)
        srmmu_set_mmureg(mreg);
 }
 
+static struct sparc32_cachetlb_ops viking_ops = {
+       .cache_all      = viking_flush_cache_all,
+       .cache_mm       = viking_flush_cache_mm,
+       .cache_page     = viking_flush_cache_page,
+       .cache_range    = viking_flush_cache_range,
+       .tlb_all        = viking_flush_tlb_all,
+       .tlb_mm         = viking_flush_tlb_mm,
+       .tlb_page       = viking_flush_tlb_page,
+       .tlb_range      = viking_flush_tlb_range,
+       .page_to_ram    = viking_flush_page_to_ram,
+       .sig_insns      = viking_flush_sig_insns,
+       .page_for_dma   = viking_flush_page_for_dma,
+};
+
+#ifdef CONFIG_SMP
+/* On sun4d the cpu broadcasts local TLB flushes, so we can just
+ * perform the local TLB flush and all the other cpus will see it.
+ * But, unfortunately, there is a bug in the sun4d XBUS backplane
+ * that requires that we add some synchronization to these flushes.
+ *
+ * The bug is that the fifo which keeps track of all the pending TLB
+ * broadcasts in the system is an entry or two too small, so if we
+ * have too many going at once we'll overflow that fifo and lose a TLB
+ * flush resulting in corruption.
+ *
+ * Our workaround is to take a global spinlock around the TLB flushes,
+ * which guarentees we won't ever have too many pending.  It's a big
+ * hammer, but a semaphore like system to make sure we only have N TLB
+ * flushes going at once will require SMP locking anyways so there's
+ * no real value in trying any harder than this.
+ */
+static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
+       .cache_all      = viking_flush_cache_all,
+       .cache_mm       = viking_flush_cache_mm,
+       .cache_page     = viking_flush_cache_page,
+       .cache_range    = viking_flush_cache_range,
+       .tlb_all        = sun4dsmp_flush_tlb_all,
+       .tlb_mm         = sun4dsmp_flush_tlb_mm,
+       .tlb_page       = sun4dsmp_flush_tlb_page,
+       .tlb_range      = sun4dsmp_flush_tlb_range,
+       .page_to_ram    = viking_flush_page_to_ram,
+       .sig_insns      = viking_flush_sig_insns,
+       .page_for_dma   = viking_flush_page_for_dma,
+};
+#endif
+
 static void __init init_viking(void)
 {
        unsigned long mreg = srmmu_get_mmureg();
@@ -1847,76 +1457,101 @@ static void __init init_viking(void)
                 * This is only necessary because of the new way in
                 * which we use the IOMMU.
                 */
-               BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
-
+               viking_ops.page_for_dma = viking_flush_page;
+#ifdef CONFIG_SMP
+               viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
+#endif
                flush_page_for_dma_global = 0;
        } else {
                srmmu_name = "TI Viking/MXCC";
                viking_mxcc_present = 1;
-
                srmmu_cache_pagetables = 1;
-
-               /* MXCC vikings lack the DMA snooping bug. */
-               BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
        }
 
-       BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
-
+       sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+               &viking_ops;
 #ifdef CONFIG_SMP
-       if (sparc_cpu_model == sun4d) {
-               BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
-       } else
+       if (sparc_cpu_model == sun4d)
+               sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+                       &viking_sun4d_smp_ops;
 #endif
-       {
-               BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
-       }
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
 
        poke_srmmu = poke_viking;
 }
 
 #ifdef CONFIG_SPARC_LEON
+static void leon_flush_cache_mm(struct mm_struct *mm)
+{
+       leon_flush_cache_all();
+}
 
-void __init poke_leonsparc(void)
+static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
 {
+       leon_flush_pcache_all(vma, page);
 }
 
-void __init init_leon(void)
+static void leon_flush_cache_range(struct vm_area_struct *vma,
+                                  unsigned long start,
+                                  unsigned long end)
 {
+       leon_flush_cache_all();
+}
 
-       srmmu_name = "LEON";
+static void leon_flush_tlb_mm(struct mm_struct *mm)
+{
+       leon_flush_tlb_all();
+}
+
+static void leon_flush_tlb_page(struct vm_area_struct *vma,
+                               unsigned long page)
+{
+       leon_flush_tlb_all();
+}
+
+static void leon_flush_tlb_range(struct vm_area_struct *vma,
+                                unsigned long start,
+                                unsigned long end)
+{
+       leon_flush_tlb_all();
+}
+
+static void leon_flush_page_to_ram(unsigned long page)
+{
+       leon_flush_cache_all();
+}
+
+static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
+{
+       leon_flush_cache_all();
+}
+
+static void leon_flush_page_for_dma(unsigned long page)
+{
+       leon_flush_dcache_all();
+}
+
+void __init poke_leonsparc(void)
+{
+}
 
-       BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
-                       BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
-                       BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
-                       BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
-                       BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
-                       BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
-                       BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
+static const struct sparc32_cachetlb_ops leon_ops = {
+       .cache_all      = leon_flush_cache_all,
+       .cache_mm       = leon_flush_cache_mm,
+       .cache_page     = leon_flush_cache_page,
+       .cache_range    = leon_flush_cache_range,
+       .tlb_all        = leon_flush_tlb_all,
+       .tlb_mm         = leon_flush_tlb_mm,
+       .tlb_page       = leon_flush_tlb_page,
+       .tlb_range      = leon_flush_tlb_range,
+       .page_to_ram    = leon_flush_page_to_ram,
+       .sig_insns      = leon_flush_sig_insns,
+       .page_for_dma   = leon_flush_page_for_dma,
+};
 
+void __init init_leon(void)
+{
+       srmmu_name = "LEON";
+       sparc32_cachetlb_ops = &leon_ops;
        poke_srmmu = poke_leonsparc;
 
        srmmu_cache_pagetables = 0;
@@ -1955,22 +1590,15 @@ static void __init get_srmmu_type(void)
                        break;
                case 0:
                case 2:
-                       /* Uniprocessor Cypress */
-                       init_cypress_604();
-                       break;
                case 10:
                case 11:
                case 12:
-                       /* _REALLY OLD_ Cypress MP chips... */
                case 13:
                case 14:
                case 15:
-                       /* MP Cypress mmu/cache-controller */
-                       init_cypress_605(mod_rev);
-                       break;
                default:
-                       /* Some other Cypress revision, assume a 605. */
-                       init_cypress_605(mod_rev);
+                       prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
+                       prom_halt();
                        break;
                }
                return;
@@ -2026,20 +1654,156 @@ static void __init get_srmmu_type(void)
        srmmu_is_bad();
 }
 
-extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
-       tsetup_mmu_patchme, rtrap_mmu_patchme;
-
-extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
-       tsetup_srmmu_stackchk, srmmu_rett_stackchk;
-
 #ifdef CONFIG_SMP
 /* Local cross-calls. */
 static void smp_flush_page_for_dma(unsigned long page)
 {
-       xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
-       local_flush_page_for_dma(page);
+       xc1((smpfunc_t) local_ops->page_for_dma, page);
+       local_ops->page_for_dma(page);
+}
+
+static void smp_flush_cache_all(void)
+{
+       xc0((smpfunc_t) local_ops->cache_all);
+       local_ops->cache_all();
+}
+
+static void smp_flush_tlb_all(void)
+{
+       xc0((smpfunc_t) local_ops->tlb_all);
+       local_ops->tlb_all();
 }
 
+static void smp_flush_cache_mm(struct mm_struct *mm)
+{
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
+               local_ops->cache_mm(mm);
+       }
+}
+
+static void smp_flush_tlb_mm(struct mm_struct *mm)
+{
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask)) {
+                       xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
+                       if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
+                               cpumask_copy(mm_cpumask(mm),
+                                            cpumask_of(smp_processor_id()));
+               }
+               local_ops->tlb_mm(mm);
+       }
+}
+
+static void smp_flush_cache_range(struct vm_area_struct *vma,
+                                 unsigned long start,
+                                 unsigned long end)
+{
+       struct mm_struct *mm = vma->vm_mm;
+
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc3((smpfunc_t) local_ops->cache_range,
+                           (unsigned long) vma, start, end);
+               local_ops->cache_range(vma, start, end);
+       }
+}
+
+static void smp_flush_tlb_range(struct vm_area_struct *vma,
+                               unsigned long start,
+                               unsigned long end)
+{
+       struct mm_struct *mm = vma->vm_mm;
+
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc3((smpfunc_t) local_ops->tlb_range,
+                           (unsigned long) vma, start, end);
+               local_ops->tlb_range(vma, start, end);
+       }
+}
+
+static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+       struct mm_struct *mm = vma->vm_mm;
+
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc2((smpfunc_t) local_ops->cache_page,
+                           (unsigned long) vma, page);
+               local_ops->cache_page(vma, page);
+       }
+}
+
+static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+       struct mm_struct *mm = vma->vm_mm;
+
+       if (mm->context != NO_CONTEXT) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
+                       xc2((smpfunc_t) local_ops->tlb_page,
+                           (unsigned long) vma, page);
+               local_ops->tlb_page(vma, page);
+       }
+}
+
+static void smp_flush_page_to_ram(unsigned long page)
+{
+       /* Current theory is that those who call this are the one's
+        * who have just dirtied their cache with the pages contents
+        * in kernel space, therefore we only run this on local cpu.
+        *
+        * XXX This experiment failed, research further... -DaveM
+        */
+#if 1
+       xc1((smpfunc_t) local_ops->page_to_ram, page);
+#endif
+       local_ops->page_to_ram(page);
+}
+
+static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+       cpumask_t cpu_mask;
+       cpumask_copy(&cpu_mask, mm_cpumask(mm));
+       cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+       if (!cpumask_empty(&cpu_mask))
+               xc2((smpfunc_t) local_ops->sig_insns,
+                   (unsigned long) mm, insn_addr);
+       local_ops->sig_insns(mm, insn_addr);
+}
+
+static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
+       .cache_all      = smp_flush_cache_all,
+       .cache_mm       = smp_flush_cache_mm,
+       .cache_page     = smp_flush_cache_page,
+       .cache_range    = smp_flush_cache_range,
+       .tlb_all        = smp_flush_tlb_all,
+       .tlb_mm         = smp_flush_tlb_mm,
+       .tlb_page       = smp_flush_tlb_page,
+       .tlb_range      = smp_flush_tlb_range,
+       .page_to_ram    = smp_flush_page_to_ram,
+       .sig_insns      = smp_flush_sig_insns,
+       .page_for_dma   = smp_flush_page_for_dma,
+};
 #endif
 
 /* Load up routines and constants for sun4m and sun4d mmu */
@@ -2047,93 +1811,36 @@ void __init load_mmu(void)
 {
        extern void ld_mmu_iommu(void);
        extern void ld_mmu_iounit(void);
-       extern void ___xchg32_sun4md(void);
 
        /* Functions */
-#ifndef CONFIG_SMP     
-       BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
-#endif
-
-       BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
-       
-       BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
-       BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_HALF(pte_filei, SRMMU_FILE);
-       BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
-       BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
-       BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
-       BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
-       BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
-       BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
-       BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
-       BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
-
-       BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
-
        get_srmmu_type();
 
 #ifdef CONFIG_SMP
        /* El switcheroo... */
+       local_ops = sparc32_cachetlb_ops;
 
-       BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
-       BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
-       BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
-       BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
-       BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
-       BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
-       BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
-       BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
-       BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
-       BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
-       BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
-
-       BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
-       if (sparc_cpu_model != sun4d &&
-           sparc_cpu_model != sparc_leon) {
-               BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
-               BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
+       if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
+               smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
+               smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
+               smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
+               smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
        }
-       BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
 
        if (poke_srmmu == poke_viking) {
                /* Avoid unnecessary cross calls. */
-               BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
-               BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
-               BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
-               BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
-               BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
-               BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
-               BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
+               smp_cachetlb_ops.cache_all = local_ops->cache_all;
+               smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
+               smp_cachetlb_ops.cache_range = local_ops->cache_range;
+               smp_cachetlb_ops.cache_page = local_ops->cache_page;
+
+               smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
+               smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
+               smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
        }
+
+       /* It really is const after this point. */
+       sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+               &smp_cachetlb_ops;
 #endif
 
        if (sparc_cpu_model == sun4d)
@@ -2148,5 +1855,4 @@ void __init load_mmu(void)
        else
                sun4m_init_smp();
 #endif
-       btfixup();
 }