Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/mm/pg-sh4.c | |
3 | * | |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | |
8cf1a743 | 5 | * Copyright (C) 2002 - 2007 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Released under the terms of the GNU GPL v2.0. | |
8 | */ | |
1da177e4 | 9 | #include <linux/mm.h> |
52e27782 | 10 | #include <linux/mutex.h> |
1da177e4 LT |
11 | #include <asm/mmu_context.h> |
12 | #include <asm/cacheflush.h> | |
13 | ||
11c19656 | 14 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) |
8b395265 | 15 | |
8cf1a743 PM |
16 | static inline void *kmap_coherent(struct page *page, unsigned long addr) |
17 | { | |
18 | enum fixed_addresses idx; | |
19 | unsigned long vaddr, flags; | |
20 | pte_t pte; | |
21 | ||
22 | inc_preempt_count(); | |
23 | ||
24 | idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; | |
25 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | |
26 | pte = mk_pte(page, PAGE_KERNEL); | |
27 | ||
28 | local_irq_save(flags); | |
29 | flush_tlb_one(get_asid(), vaddr); | |
30 | local_irq_restore(flags); | |
31 | ||
32 | update_mmu_cache(NULL, vaddr, pte); | |
33 | ||
34 | return (void *)vaddr; | |
35 | } | |
36 | ||
37 | static inline void kunmap_coherent(struct page *page) | |
38 | { | |
39 | dec_preempt_count(); | |
40 | preempt_check_resched(); | |
41 | } | |
42 | ||
1da177e4 LT |
43 | /* |
44 | * clear_user_page | |
45 | * @to: P1 address | |
46 | * @address: U0 address to be mapped | |
47 | * @page: page (virt_to_page(to)) | |
48 | */ | |
49 | void clear_user_page(void *to, unsigned long address, struct page *page) | |
50 | { | |
39e688a9 | 51 | __set_bit(PG_mapped, &page->flags); |
1da177e4 LT |
52 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
53 | clear_page(to); | |
54 | else { | |
8cf1a743 PM |
55 | void *vto = kmap_coherent(page, address); |
56 | __clear_user_page(vto, to); | |
57 | kunmap_coherent(vto); | |
1da177e4 LT |
58 | } |
59 | } | |
60 | ||
61 | /* | |
62 | * copy_user_page | |
63 | * @to: P1 address | |
64 | * @from: P1 address | |
65 | * @address: U0 address to be mapped | |
66 | * @page: page (virt_to_page(to)) | |
67 | */ | |
8b395265 | 68 | void copy_user_page(void *to, void *from, unsigned long address, |
1da177e4 LT |
69 | struct page *page) |
70 | { | |
39e688a9 | 71 | __set_bit(PG_mapped, &page->flags); |
1da177e4 LT |
72 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
73 | copy_page(to, from); | |
74 | else { | |
8cf1a743 PM |
75 | void *vfrom = kmap_coherent(page, address); |
76 | __copy_user_page(vfrom, from, to); | |
77 | kunmap_coherent(vfrom); | |
1da177e4 LT |
78 | } |
79 | } | |
39e688a9 PM |
80 | |
81 | /* | |
82 | * For SH-4, we have our own implementation for ptep_get_and_clear | |
83 | */ | |
84 | inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
85 | { | |
86 | pte_t pte = *ptep; | |
87 | ||
88 | pte_clear(mm, addr, ptep); | |
89 | if (!pte_not_present(pte)) { | |
90 | unsigned long pfn = pte_pfn(pte); | |
91 | if (pfn_valid(pfn)) { | |
92 | struct page *page = pfn_to_page(pfn); | |
93 | struct address_space *mapping = page_mapping(page); | |
94 | if (!mapping || !mapping_writably_mapped(mapping)) | |
95 | __clear_bit(PG_mapped, &page->flags); | |
96 | } | |
97 | } | |
98 | return pte; | |
99 | } |