Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/mm/pg-sh4.c | |
3 | * | |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | |
8cf1a743 | 5 | * Copyright (C) 2002 - 2007 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Released under the terms of the GNU GPL v2.0. | |
8 | */ | |
1da177e4 | 9 | #include <linux/mm.h> |
52e27782 | 10 | #include <linux/mutex.h> |
e06c4e57 | 11 | #include <linux/fs.h> |
7747b9a4 PM |
12 | #include <linux/highmem.h> |
13 | #include <linux/module.h> | |
1da177e4 LT |
14 | #include <asm/mmu_context.h> |
15 | #include <asm/cacheflush.h> | |
16 | ||
11c19656 | 17 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) |
8b395265 | 18 | |
8cf1a743 PM |
19 | static inline void *kmap_coherent(struct page *page, unsigned long addr) |
20 | { | |
21 | enum fixed_addresses idx; | |
22 | unsigned long vaddr, flags; | |
23 | pte_t pte; | |
24 | ||
25 | inc_preempt_count(); | |
26 | ||
27 | idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; | |
28 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | |
29 | pte = mk_pte(page, PAGE_KERNEL); | |
30 | ||
31 | local_irq_save(flags); | |
32 | flush_tlb_one(get_asid(), vaddr); | |
33 | local_irq_restore(flags); | |
34 | ||
35 | update_mmu_cache(NULL, vaddr, pte); | |
36 | ||
37 | return (void *)vaddr; | |
38 | } | |
39 | ||
40 | static inline void kunmap_coherent(struct page *page) | |
41 | { | |
42 | dec_preempt_count(); | |
43 | preempt_check_resched(); | |
44 | } | |
45 | ||
1da177e4 LT |
46 | /* |
47 | * clear_user_page | |
48 | * @to: P1 address | |
49 | * @address: U0 address to be mapped | |
50 | * @page: page (virt_to_page(to)) | |
51 | */ | |
52 | void clear_user_page(void *to, unsigned long address, struct page *page) | |
53 | { | |
39e688a9 | 54 | __set_bit(PG_mapped, &page->flags); |
1da177e4 LT |
55 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
56 | clear_page(to); | |
57 | else { | |
8cf1a743 PM |
58 | void *vto = kmap_coherent(page, address); |
59 | __clear_user_page(vto, to); | |
60 | kunmap_coherent(vto); | |
1da177e4 LT |
61 | } |
62 | } | |
63 | ||
64 | /* | |
65 | * copy_user_page | |
66 | * @to: P1 address | |
67 | * @from: P1 address | |
68 | * @address: U0 address to be mapped | |
69 | * @page: page (virt_to_page(to)) | |
70 | */ | |
8b395265 | 71 | void copy_user_page(void *to, void *from, unsigned long address, |
1da177e4 LT |
72 | struct page *page) |
73 | { | |
39e688a9 | 74 | __set_bit(PG_mapped, &page->flags); |
1da177e4 LT |
75 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
76 | copy_page(to, from); | |
77 | else { | |
8cf1a743 PM |
78 | void *vfrom = kmap_coherent(page, address); |
79 | __copy_user_page(vfrom, from, to); | |
80 | kunmap_coherent(vfrom); | |
1da177e4 LT |
81 | } |
82 | } | |
39e688a9 | 83 | |
7747b9a4 PM |
84 | void copy_user_highpage(struct page *to, struct page *from, |
85 | unsigned long vaddr, struct vm_area_struct *vma) | |
86 | { | |
87 | void *vfrom, *vto; | |
88 | ||
89 | __set_bit(PG_mapped, &to->flags); | |
90 | ||
91 | vto = kmap_atomic(to, KM_USER1); | |
92 | vfrom = kmap_coherent(from, vaddr); | |
93 | copy_page(vto, vfrom); | |
94 | kunmap_coherent(vfrom); | |
95 | ||
96 | if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) | |
97 | __flush_wback_region(vto, PAGE_SIZE); | |
98 | ||
99 | kunmap_atomic(vto, KM_USER1); | |
100 | /* Make sure this page is cleared on other CPU's too before using it */ | |
101 | smp_wmb(); | |
102 | } | |
103 | EXPORT_SYMBOL(copy_user_highpage); | |
104 | ||
39e688a9 PM |
105 | /* |
106 | * For SH-4, we have our own implementation for ptep_get_and_clear | |
107 | */ | |
108 | inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
109 | { | |
110 | pte_t pte = *ptep; | |
111 | ||
112 | pte_clear(mm, addr, ptep); | |
113 | if (!pte_not_present(pte)) { | |
114 | unsigned long pfn = pte_pfn(pte); | |
115 | if (pfn_valid(pfn)) { | |
116 | struct page *page = pfn_to_page(pfn); | |
117 | struct address_space *mapping = page_mapping(page); | |
118 | if (!mapping || !mapping_writably_mapped(mapping)) | |
119 | __clear_bit(PG_mapped, &page->flags); | |
120 | } | |
121 | } | |
122 | return pte; | |
123 | } |