1 #ifndef _LINUX_HIGHMEM_H
2 #define _LINUX_HIGHMEM_H
5 #include <linux/kernel.h>
7 #include <linux/uaccess.h>
9 #include <asm/cacheflush.h>
11 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
12 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
17 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
18 static inline void flush_kernel_dcache_page(struct page *page)
21 static inline void flush_kernel_vmap_range(void *vaddr, int size)
24 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
29 #include <asm/kmap_types.h>
32 #include <asm/highmem.h>
34 /* declarations for linux/mm/highmem.c */
35 unsigned int nr_free_highpages(void);
36 extern unsigned long totalhigh_pages;
38 void kmap_flush_unused(void);
40 DECLARE_PER_CPU(int, __kmap_atomic_idx);
42 static inline int kmap_atomic_idx_push(void)
44 int idx = __get_cpu_var(__kmap_atomic_idx)++;
45 #ifdef CONFIG_DEBUG_HIGHMEM
46 WARN_ON_ONCE(in_irq() && !irqs_disabled());
47 BUG_ON(idx > KM_TYPE_NR);
52 static inline int kmap_atomic_idx_pop(void)
54 int idx = --__get_cpu_var(__kmap_atomic_idx);
55 #ifdef CONFIG_DEBUG_HIGHMEM
61 #else /* CONFIG_HIGHMEM */
63 static inline unsigned int nr_free_highpages(void) { return 0; }
65 #define totalhigh_pages 0UL
68 static inline void *kmap(struct page *page)
71 return page_address(page);
74 static inline void kunmap(struct page *page)
78 static inline void *__kmap_atomic(struct page *page)
81 return page_address(page);
83 #define kmap_atomic_prot(page, prot) __kmap_atomic(page)
85 static inline void __kunmap_atomic(void *addr)
90 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
91 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
93 #define kmap_flush_unused() do {} while(0)
96 #endif /* CONFIG_HIGHMEM */
99 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
101 #define kmap_atomic(page, args...) __kmap_atomic(page)
104 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
105 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
107 #define kunmap_atomic(addr, args...) \
109 BUILD_BUG_ON(__same_type((addr), struct page *)); \
110 __kunmap_atomic(addr); \
113 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
114 #ifndef clear_user_highpage
115 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
117 void *addr = kmap_atomic(page, KM_USER0);
118 clear_user_page(addr, vaddr, page);
119 kunmap_atomic(addr, KM_USER0);
123 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
125 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
126 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
127 * @vma: The VMA the page is to be allocated for
128 * @vaddr: The virtual address the page will be inserted into
130 * This function will allocate a page for a VMA but the caller is expected
131 * to specify via movableflags whether the page will be movable in the
134 * An architecture may override this function by defining
135 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
138 static inline struct page *
139 __alloc_zeroed_user_highpage(gfp_t movableflags,
140 struct vm_area_struct *vma,
143 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
147 clear_user_highpage(page, vaddr);
154 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
155 * @vma: The VMA the page is to be allocated for
156 * @vaddr: The virtual address the page will be inserted into
158 * This function will allocate a page for a VMA that the caller knows will
159 * be able to migrate in the future using move_pages() or reclaimed
161 static inline struct page *
162 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
165 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
168 static inline void clear_highpage(struct page *page)
170 void *kaddr = kmap_atomic(page, KM_USER0);
172 kunmap_atomic(kaddr, KM_USER0);
175 static inline void zero_user_segments(struct page *page,
176 unsigned start1, unsigned end1,
177 unsigned start2, unsigned end2)
179 void *kaddr = kmap_atomic(page, KM_USER0);
181 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
184 memset(kaddr + start1, 0, end1 - start1);
187 memset(kaddr + start2, 0, end2 - start2);
189 kunmap_atomic(kaddr, KM_USER0);
190 flush_dcache_page(page);
193 static inline void zero_user_segment(struct page *page,
194 unsigned start, unsigned end)
196 zero_user_segments(page, start, end, 0, 0);
199 static inline void zero_user(struct page *page,
200 unsigned start, unsigned size)
202 zero_user_segments(page, start, start + size, 0, 0);
205 static inline void __deprecated memclear_highpage_flush(struct page *page,
206 unsigned int offset, unsigned int size)
208 zero_user(page, offset, size);
211 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
213 static inline void copy_user_highpage(struct page *to, struct page *from,
214 unsigned long vaddr, struct vm_area_struct *vma)
218 vfrom = kmap_atomic(from, KM_USER0);
219 vto = kmap_atomic(to, KM_USER1);
220 copy_user_page(vto, vfrom, vaddr, to);
221 kunmap_atomic(vto, KM_USER1);
222 kunmap_atomic(vfrom, KM_USER0);
227 static inline void copy_highpage(struct page *to, struct page *from)
231 vfrom = kmap_atomic(from, KM_USER0);
232 vto = kmap_atomic(to, KM_USER1);
233 copy_page(vto, vfrom);
234 kunmap_atomic(vto, KM_USER1);
235 kunmap_atomic(vfrom, KM_USER0);
238 #endif /* _LINUX_HIGHMEM_H */