powerpc/mm: Move around mmu_gathers definition on 64-bit
[linux-2.6-block.git] / arch / powerpc / include / asm / mmu_context.h
CommitLineData
047ea784
PM
1#ifndef __ASM_POWERPC_MMU_CONTEXT_H
2#define __ASM_POWERPC_MMU_CONTEXT_H
88ced031 3#ifdef __KERNEL__
047ea784 4
5e696617
BH
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/spinlock.h>
80a7cc6c
KG
9#include <asm/mmu.h>
10#include <asm/cputable.h>
11#include <asm-generic/mm_hooks.h>
5e696617 12#include <asm/cputhreads.h>
80a7cc6c
KG
13
14/*
5e696617 15 * Most if the context management is out of line
80a7cc6c 16 */
1da177e4
LT
17extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18extern void destroy_context(struct mm_struct *mm);
19
5e696617 20extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1da177e4
LT
21extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
22extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
5e696617 23extern void set_context(unsigned long id, pgd_t *pgd);
1da177e4 24
6f0ef0f5
BH
25#ifdef CONFIG_PPC_BOOK3S_64
26static inline void mmu_context_init(void) { }
27#else
28extern void mmu_context_init(void);
29#endif
30
1da177e4
LT
31/*
32 * switch_mm is the entry point called from the architecture independent
33 * code in kernel/sched.c
34 */
35static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 struct task_struct *tsk)
37{
5e696617 38 /* Mark this context has been used on the new CPU */
56aa4129 39 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
5e696617
BH
40
41 /* 32-bit keeps track of the current PGDIR in the thread struct */
42#ifdef CONFIG_PPC32
43 tsk->thread.pgdir = next->pgd;
44#endif /* CONFIG_PPC32 */
1da177e4 45
5e696617 46 /* Nothing else to do if we aren't actually switching */
1da177e4
LT
47 if (prev == next)
48 return;
49
5e696617
BH
50 /* We must stop all altivec streams before changing the HW
51 * context
52 */
1da177e4
LT
53#ifdef CONFIG_ALTIVEC
54 if (cpu_has_feature(CPU_FTR_ALTIVEC))
55 asm volatile ("dssall");
56#endif /* CONFIG_ALTIVEC */
57
5e696617
BH
58 /* The actual HW switching method differs between the various
59 * sub architectures.
60 */
61#ifdef CONFIG_PPC_STD_MMU_64
1da177e4
LT
62 if (cpu_has_feature(CPU_FTR_SLB))
63 switch_slb(tsk, next);
64 else
65 switch_stab(tsk, next);
5e696617
BH
66#else
67 /* Out of line for now */
68 switch_mmu_context(prev, next);
69#endif
70
1da177e4
LT
71}
72
73#define deactivate_mm(tsk,mm) do { } while (0)
74
75/*
76 * After we have set current->mm to a new value, this activates
77 * the context for the new mm so we see the new mappings.
78 */
79static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
80{
81 unsigned long flags;
82
83 local_irq_save(flags);
84 switch_mm(prev, next, current);
85 local_irq_restore(flags);
86}
87
5e696617
BH
88/* We don't currently use enter_lazy_tlb() for anything */
89static inline void enter_lazy_tlb(struct mm_struct *mm,
90 struct task_struct *tsk)
91{
92}
93
88ced031 94#endif /* __KERNEL__ */
047ea784 95#endif /* __ASM_POWERPC_MMU_CONTEXT_H */