Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/copypage-v6.c | |
3 | * | |
4 | * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/init.h> | |
11 | #include <linux/spinlock.h> | |
12 | #include <linux/mm.h> | |
13 | ||
14 | #include <asm/page.h> | |
15 | #include <asm/pgtable.h> | |
16 | #include <asm/shmparam.h> | |
17 | #include <asm/tlbflush.h> | |
18 | #include <asm/cacheflush.h> | |
19 | ||
20 | #if SHMLBA > 16384 | |
21 | #error FIX ME | |
22 | #endif | |
23 | ||
24 | #define from_address (0xffff8000) | |
25 | #define from_pgprot PAGE_KERNEL | |
26 | #define to_address (0xffffc000) | |
27 | #define to_pgprot PAGE_KERNEL | |
28 | ||
08ee4e4c RK |
29 | #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) |
30 | ||
1da177e4 LT |
31 | static DEFINE_SPINLOCK(v6_lock); |
32 | ||
33 | #define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | |
34 | ||
35 | /* | |
36 | * Copy the user page. No aliasing to deal with so we can just | |
37 | * attack the kernel's existing mapping of these pages. | |
38 | */ | |
39 | void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) | |
40 | { | |
41 | copy_page(kto, kfrom); | |
42 | } | |
43 | ||
44 | /* | |
45 | * Clear the user page. No aliasing to deal with so we can just | |
46 | * attack the kernel's existing mapping of this page. | |
47 | */ | |
48 | void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) | |
49 | { | |
50 | clear_page(kaddr); | |
51 | } | |
52 | ||
53 | /* | |
54 | * Copy the page, taking account of the cache colour. | |
55 | */ | |
56 | void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) | |
57 | { | |
58 | unsigned int offset = DCACHE_COLOUR(vaddr); | |
59 | unsigned long from, to; | |
60 | ||
61 | /* | |
62 | * Discard data in the kernel mapping for the new page. | |
63 | * FIXME: needs this MCRR to be supported. | |
64 | */ | |
65 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" | |
66 | : | |
67 | : "r" (kto), | |
68 | "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) | |
69 | : "cc"); | |
70 | ||
71 | /* | |
72 | * Now copy the page using the same cache colour as the | |
73 | * pages ultimate destination. | |
74 | */ | |
75 | spin_lock(&v6_lock); | |
76 | ||
08ee4e4c RK |
77 | set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); |
78 | set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); | |
1da177e4 LT |
79 | |
80 | from = from_address + (offset << PAGE_SHIFT); | |
81 | to = to_address + (offset << PAGE_SHIFT); | |
82 | ||
83 | flush_tlb_kernel_page(from); | |
84 | flush_tlb_kernel_page(to); | |
85 | ||
86 | copy_page((void *)to, (void *)from); | |
87 | ||
88 | spin_unlock(&v6_lock); | |
89 | } | |
90 | ||
91 | /* | |
92 | * Clear the user page. We need to deal with the aliasing issues, | |
93 | * so remap the kernel page into the same cache colour as the user | |
94 | * page. | |
95 | */ | |
96 | void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | |
97 | { | |
98 | unsigned int offset = DCACHE_COLOUR(vaddr); | |
99 | unsigned long to = to_address + (offset << PAGE_SHIFT); | |
100 | ||
101 | /* | |
102 | * Discard data in the kernel mapping for the new page | |
103 | * FIXME: needs this MCRR to be supported. | |
104 | */ | |
105 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" | |
106 | : | |
107 | : "r" (kaddr), | |
108 | "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) | |
109 | : "cc"); | |
110 | ||
111 | /* | |
112 | * Now clear the page using the same cache colour as | |
113 | * the pages ultimate destination. | |
114 | */ | |
115 | spin_lock(&v6_lock); | |
116 | ||
08ee4e4c | 117 | set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); |
1da177e4 LT |
118 | flush_tlb_kernel_page(to); |
119 | clear_page((void *)to); | |
120 | ||
121 | spin_unlock(&v6_lock); | |
122 | } | |
123 | ||
124 | struct cpu_user_fns v6_user_fns __initdata = { | |
125 | .cpu_clear_user_page = v6_clear_user_page_nonaliasing, | |
126 | .cpu_copy_user_page = v6_copy_user_page_nonaliasing, | |
127 | }; | |
128 | ||
129 | static int __init v6_userpage_init(void) | |
130 | { | |
131 | if (cache_is_vipt_aliasing()) { | |
1da177e4 LT |
132 | cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; |
133 | cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; | |
134 | } | |
135 | ||
136 | return 0; | |
137 | } | |
138 | ||
08ee4e4c | 139 | core_initcall(v6_userpage_init); |