ARM: ZERO_PAGE: Avoid flush_dcache_page() for zero page
[linux-2.6-block.git] / arch / arm / mm / flush.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13
14#include <asm/cacheflush.h>
46097c7d 15#include <asm/cachetype.h>
1da177e4 16#include <asm/system.h>
8d802d28
RK
17#include <asm/tlbflush.h>
18
1b2e2b73
RK
19#include "mm.h"
20
8d802d28 21#ifdef CONFIG_CPU_CACHE_VIPT
d7b6b358 22
481467d6
CM
23#define ALIAS_FLUSH_START 0xffff4000
24
481467d6
CM
25static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
26{
27 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
141fa40c 28 const int zero = 0;
481467d6 29
ad1ae2fe 30 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
481467d6
CM
31 flush_tlb_kernel_page(to);
32
33 asm( "mcrr p15, 0, %1, %0, c14\n"
df71dfd4 34 " mcr p15, 0, %2, c7, c10, 4"
481467d6 35 :
141fa40c 36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
481467d6 37 : "cc");
df71dfd4 38 __flush_icache_all();
481467d6
CM
39}
40
d7b6b358
RK
41void flush_cache_mm(struct mm_struct *mm)
42{
43 if (cache_is_vivt()) {
2f0b1926 44 vivt_flush_cache_mm(mm);
d7b6b358
RK
45 return;
46 }
47
48 if (cache_is_vipt_aliasing()) {
49 asm( "mcr p15, 0, %0, c7, c14, 0\n"
df71dfd4 50 " mcr p15, 0, %0, c7, c10, 4"
d7b6b358
RK
51 :
52 : "r" (0)
53 : "cc");
df71dfd4 54 __flush_icache_all();
d7b6b358
RK
55 }
56}
57
58void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
59{
60 if (cache_is_vivt()) {
2f0b1926 61 vivt_flush_cache_range(vma, start, end);
d7b6b358
RK
62 return;
63 }
64
65 if (cache_is_vipt_aliasing()) {
66 asm( "mcr p15, 0, %0, c7, c14, 0\n"
df71dfd4 67 " mcr p15, 0, %0, c7, c10, 4"
d7b6b358
RK
68 :
69 : "r" (0)
70 : "cc");
df71dfd4 71 __flush_icache_all();
d7b6b358
RK
72 }
73}
74
75void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
76{
77 if (cache_is_vivt()) {
2f0b1926 78 vivt_flush_cache_page(vma, user_addr, pfn);
d7b6b358
RK
79 return;
80 }
81
82 if (cache_is_vipt_aliasing())
83 flush_pfn_alias(pfn, user_addr);
84}
a188ad2b
GD
85
86void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
87 unsigned long uaddr, void *kaddr,
88 unsigned long len, int write)
89{
90 if (cache_is_vivt()) {
2f0b1926 91 vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
a188ad2b
GD
92 return;
93 }
94
95 if (cache_is_vipt_aliasing()) {
96 flush_pfn_alias(page_to_pfn(page), uaddr);
97 return;
98 }
99
100 /* VIPT non-aliasing cache */
56f8ba83 101 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
a71ebdfa 102 vma->vm_flags & VM_EXEC) {
a188ad2b
GD
103 unsigned long addr = (unsigned long)kaddr;
104 /* only flushing the kernel mapping on non-aliasing VIPT */
105 __cpuc_coherent_kern_range(addr, addr + len);
106 }
107}
8d802d28
RK
108#else
109#define flush_pfn_alias(pfn,vaddr) do { } while (0)
110#endif
1da177e4 111
8830f04a 112void __flush_dcache_page(struct address_space *mapping, struct page *page)
1da177e4 113{
b7dc0b2c
RK
114 void *addr = page_address(page);
115
1da177e4
LT
116 /*
117 * Writeback any data associated with the kernel mapping of this
118 * page. This ensures that data in the physical page is mutually
119 * coherent with the kernels mapping.
120 */
13f96d8f
NP
121#ifdef CONFIG_HIGHMEM
122 /*
123 * kmap_atomic() doesn't set the page virtual address, and
124 * kunmap_atomic() takes care of cache flushing already.
125 */
b7dc0b2c 126 if (addr)
13f96d8f 127#endif
b7dc0b2c 128 __cpuc_flush_dcache_page(addr);
1da177e4
LT
129
130 /*
8830f04a
RK
131 * If this is a page cache page, and we have an aliasing VIPT cache,
132 * we only need to do one flush - which would be at the relevant
8d802d28
RK
133 * userspace colour, which is congruent with page->index.
134 */
8830f04a
RK
135 if (mapping && cache_is_vipt_aliasing())
136 flush_pfn_alias(page_to_pfn(page),
137 page->index << PAGE_CACHE_SHIFT);
138}
139
140static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
141{
142 struct mm_struct *mm = current->active_mm;
143 struct vm_area_struct *mpnt;
144 struct prio_tree_iter iter;
145 pgoff_t pgoff;
8d802d28 146
1da177e4
LT
147 /*
148 * There are possible user space mappings of this page:
149 * - VIVT cache: we need to also write back and invalidate all user
150 * data in the current VM view associated with this page.
151 * - aliasing VIPT: we only need to find one mapping of this page.
152 */
153 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
154
155 flush_dcache_mmap_lock(mapping);
156 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
157 unsigned long offset;
158
159 /*
160 * If this VMA is not in our MM, we can ignore it.
161 */
162 if (mpnt->vm_mm != mm)
163 continue;
164 if (!(mpnt->vm_flags & VM_MAYSHARE))
165 continue;
166 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
167 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
1da177e4
LT
168 }
169 flush_dcache_mmap_unlock(mapping);
170}
171
172/*
173 * Ensure cache coherency between kernel mapping and userspace mapping
174 * of this page.
175 *
176 * We have three cases to consider:
177 * - VIPT non-aliasing cache: fully coherent so nothing required.
178 * - VIVT: fully aliasing, so we need to handle every alias in our
179 * current VM view.
180 * - VIPT aliasing: need to handle one alias in our current VM view.
181 *
182 * If we need to handle aliasing:
183 * If the page only exists in the page cache and there are no user
184 * space mappings, we can be lazy and remember that we may have dirty
185 * kernel cache lines for later. Otherwise, we assume we have
186 * aliasing mappings.
df2f5e72
RK
187 *
188 * Note that we disable the lazy flush for SMP.
1da177e4
LT
189 */
190void flush_dcache_page(struct page *page)
191{
421fe93c
RK
192 struct address_space *mapping;
193
194 /*
195 * The zero page is never written to, so never has any dirty
196 * cache lines, and therefore never needs to be flushed.
197 */
198 if (page == ZERO_PAGE(0))
199 return;
200
201 mapping = page_mapping(page);
1da177e4 202
df2f5e72 203#ifndef CONFIG_SMP
d73cd428 204 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
1da177e4 205 set_bit(PG_dcache_dirty, &page->flags);
df2f5e72
RK
206 else
207#endif
208 {
1da177e4 209 __flush_dcache_page(mapping, page);
8830f04a
RK
210 if (mapping && cache_is_vivt())
211 __flush_dcache_aliases(mapping, page);
826cbdaf
CM
212 else if (mapping)
213 __flush_icache_all();
8830f04a 214 }
1da177e4
LT
215}
216EXPORT_SYMBOL(flush_dcache_page);
6020dff0
RK
217
218/*
219 * Flush an anonymous page so that users of get_user_pages()
220 * can safely access the data. The expected sequence is:
221 *
222 * get_user_pages()
223 * -> flush_anon_page
224 * memcpy() to/from page
225 * if written to page, flush_dcache_page()
226 */
227void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
228{
229 unsigned long pfn;
230
231 /* VIPT non-aliasing caches need do nothing */
232 if (cache_is_vipt_nonaliasing())
233 return;
234
235 /*
236 * Write back and invalidate userspace mapping.
237 */
238 pfn = page_to_pfn(page);
239 if (cache_is_vivt()) {
240 flush_cache_page(vma, vmaddr, pfn);
241 } else {
242 /*
243 * For aliasing VIPT, we can flush an alias of the
244 * userspace address only.
245 */
246 flush_pfn_alias(pfn, vmaddr);
247 }
248
249 /*
250 * Invalidate kernel mapping. No data should be contained
251 * in this mapping of the page. FIXME: this is overkill
252 * since we actually ask for a write-back and invalidate.
253 */
254 __cpuc_flush_dcache_page(page_address(page));
255}