mm/hugetlb: add more arch-defined huge_pte functions
[linux-2.6-block.git] / arch / tile / include / asm / hugetlb.h
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_HUGETLB_H
16#define _ASM_TILE_HUGETLB_H
17
18#include <asm/page.h>
106c992a 19#include <asm-generic/hugetlb.h>
867e359b
CM
20
21
22static inline int is_hugepage_only_range(struct mm_struct *mm,
23 unsigned long addr,
24 unsigned long len) {
25 return 0;
26}
27
28/*
29 * If the arch doesn't supply something else, assume that hugepage
30 * size aligned regions are ok without further preparation.
31 */
32static inline int prepare_hugepage_range(struct file *file,
33 unsigned long addr, unsigned long len)
34{
35 struct hstate *h = hstate_file(file);
36 if (len & ~huge_page_mask(h))
37 return -EINVAL;
38 if (addr & ~huge_page_mask(h))
39 return -EINVAL;
40 return 0;
41}
42
43static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
44{
45}
46
47static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
48 unsigned long addr, unsigned long end,
49 unsigned long floor,
50 unsigned long ceiling)
51{
52 free_pgd_range(tlb, addr, end, floor, ceiling);
53}
54
55static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
56 pte_t *ptep, pte_t pte)
57{
76c567fb 58 set_pte(ptep, pte);
867e359b
CM
59}
60
61static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
62 unsigned long addr, pte_t *ptep)
63{
64 return ptep_get_and_clear(mm, addr, ptep);
65}
66
67static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
68 unsigned long addr, pte_t *ptep)
69{
70 ptep_clear_flush(vma, addr, ptep);
71}
72
73static inline int huge_pte_none(pte_t pte)
74{
75 return pte_none(pte);
76}
77
78static inline pte_t huge_pte_wrprotect(pte_t pte)
79{
80 return pte_wrprotect(pte);
81}
82
83static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
84 unsigned long addr, pte_t *ptep)
85{
86 ptep_set_wrprotect(mm, addr, ptep);
87}
88
89static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
90 unsigned long addr, pte_t *ptep,
91 pte_t pte, int dirty)
92{
93 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
94}
95
96static inline pte_t huge_ptep_get(pte_t *ptep)
97{
98 return *ptep;
99}
100
101static inline int arch_prepare_hugepage(struct page *page)
102{
103 return 0;
104}
105
106static inline void arch_release_hugepage(struct page *page)
107{
108}
109
5d3a551c
WD
110static inline void arch_clear_hugepage_flags(struct page *page)
111{
112}
113
621b1955
CM
114#ifdef CONFIG_HUGETLB_SUPER_PAGES
115static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
116 struct page *page, int writable)
117{
118 size_t pagesize = huge_page_size(hstate_vma(vma));
119 if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
120 entry = pte_mksuper(entry);
121 return entry;
122}
123#define arch_make_huge_pte arch_make_huge_pte
124
125/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
126enum {
127 HUGE_SHIFT_PGDIR = 0,
128 HUGE_SHIFT_PMD = 1,
129 HUGE_SHIFT_PAGE = 2,
130 HUGE_SHIFT_ENTRIES
131};
132extern int huge_shift[HUGE_SHIFT_ENTRIES];
133#endif
134
867e359b 135#endif /* _ASM_TILE_HUGETLB_H */