2 * Low level TLB handling.
4 * Copyright (C) 2000-2003, Axis Communications AB.
6 * Authors: Bjorn Wesen <bjornw@axis.com>
7 * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
9 #include <linux/mm_types.h>
12 #include <asm/mmu_context.h>
13 #include <arch/hwregs/asm/mmu_defs_asm.h>
14 #include <arch/hwregs/supp_reg.h>
16 #define UPDATE_TLB_SEL_IDX(val) \
18 unsigned long tlb_sel; \
20 tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \
21 SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \
24 #define UPDATE_TLB_HILO(tlb_hi, tlb_lo) \
26 SUPP_REG_WR(RW_MM_TLB_HI, tlb_hi); \
27 SUPP_REG_WR(RW_MM_TLB_LO, tlb_lo); \
31 * The TLB can host up to 256 different mm contexts at the same time. The running
32 * context is found in the PID register. Each TLB entry contains a page_id that
33 * has to match the PID register to give a hit. page_id_map keeps track of which
34 * mm's is assigned to which page_id's, making sure it's known when to
35 * invalidate TLB entries.
37 * The last page_id is never running, it is used as an invalid page_id so that
38 * it's possible to make TLB entries that will nerver match.
40 * Note; the flushes needs to be atomic otherwise an interrupt hander that uses
41 * vmalloc'ed memory might cause a TLB load in the middle of a flush.
44 /* Flush all TLB entries. */
51 unsigned long mmu_tlb_hi;
52 unsigned long mmu_tlb_sel;
55 * Mask with 0xf so similar TLB entries aren't written in the same 4-way
58 local_irq_save(flags);
60 for (mmu = 1; mmu <= 2; mmu++) {
61 SUPP_BANK_SEL(mmu); /* Select the MMU */
62 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
63 /* Store invalid entry */
64 mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i);
66 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID)
67 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf));
69 SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel);
70 SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi);
71 SUPP_REG_WR(RW_MM_TLB_LO, 0);
75 local_irq_restore(flags);
78 /* Flush an entire user address space. */
80 __flush_tlb_mm(struct mm_struct *mm)
85 unsigned long page_id;
87 unsigned long mmu_tlb_hi;
89 page_id = mm->context.page_id;
91 if (page_id == NO_CONTEXT)
94 /* Mark the TLB entries that match the page_id as invalid. */
95 local_irq_save(flags);
97 for (mmu = 1; mmu <= 2; mmu++) {
99 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
100 UPDATE_TLB_SEL_IDX(i);
102 /* Get the page_id */
103 SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
105 /* Check if the page_id match. */
106 if ((tlb_hi & 0xff) == page_id) {
107 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid,
109 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn,
112 UPDATE_TLB_HILO(mmu_tlb_hi, 0);
117 local_irq_restore(flags);
120 /* Invalidate a single page. */
122 __flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
126 unsigned long page_id;
128 unsigned long tlb_hi;
129 unsigned long mmu_tlb_hi;
131 page_id = vma->vm_mm->context.page_id;
133 if (page_id == NO_CONTEXT)
139 * Invalidate those TLB entries that match both the mm context and the
140 * requested virtual address.
142 local_irq_save(flags);
144 for (mmu = 1; mmu <= 2; mmu++) {
146 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
147 UPDATE_TLB_SEL_IDX(i);
148 SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
150 /* Check if page_id and address matches */
151 if (((tlb_hi & 0xff) == page_id) &&
152 ((tlb_hi & PAGE_MASK) == addr)) {
153 mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid,
154 INVALID_PAGEID) | addr;
156 UPDATE_TLB_HILO(mmu_tlb_hi, 0);
161 local_irq_restore(flags);
165 * Initialize the context related info for a new mm_struct
170 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
172 mm->context.page_id = NO_CONTEXT;
176 static DEFINE_SPINLOCK(mmu_context_lock);
178 /* Called in schedule() just before actually doing the switch_to. */
180 switch_mm(struct mm_struct *prev, struct mm_struct *next,
181 struct task_struct *tsk)
184 int cpu = smp_processor_id();
186 /* Make sure there is a MMU context. */
187 spin_lock(&mmu_context_lock);
188 get_mmu_context(next);
189 cpumask_set_cpu(cpu, mm_cpumask(next));
190 spin_unlock(&mmu_context_lock);
193 * Remember the pgd for the fault handlers. Keep a separate
194 * copy of it because current and active_mm might be invalid
195 * at points where * there's still a need to derefer the pgd.
197 per_cpu(current_pgd, cpu) = next->pgd;
199 /* Switch context in the MMU. */
200 if (tsk && task_thread_info(tsk)) {
201 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id |
202 task_thread_info(tsk)->tls);
204 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);