Create the ZONE_MOVABLE zone
[linux-2.6-block.git] / include / linux / hugetlb.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
4#ifdef CONFIG_HUGETLB_PAGE
5
6#include <linux/mempolicy.h>
516dffdc 7#include <linux/shm.h>
63551ae0 8#include <asm/tlbflush.h>
1da177e4
LT
9
10struct ctl_table;
11
12static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
13{
14 return vma->vm_flags & VM_HUGETLB;
15}
16
17int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
18int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
19int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
1da177e4 20void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
502717f4 21void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
1da177e4
LT
22int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
23int hugetlb_report_meminfo(char *);
24int hugetlb_report_node_meminfo(int, char *);
1da177e4 25unsigned long hugetlb_total_pages(void);
ac9b9c66
HD
26int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
27 unsigned long address, int write_access);
a43a8c39
CK
28int hugetlb_reserve_pages(struct inode *inode, long from, long to);
29void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
1da177e4
LT
30
31extern unsigned long max_huge_pages;
32extern const unsigned long hugetlb_zero, hugetlb_infinity;
33extern int sysctl_hugetlb_shm_group;
34
63551ae0
DG
35/* arch callbacks */
36
37pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
38pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
39dde65c 39int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
63551ae0
DG
40struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
41 int write);
42struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
43 pmd_t *pmd, int write);
63551ae0 44int pmd_huge(pmd_t pmd);
8f860591
ZY
45void hugetlb_change_protection(struct vm_area_struct *vma,
46 unsigned long address, unsigned long end, pgprot_t newprot);
63551ae0 47
1da177e4
LT
48#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
49#define is_hugepage_only_range(mm, addr, len) 0
9da61aef
DG
50#endif
51
52#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
53#define hugetlb_free_pgd_range free_pgd_range
3915bcf3
DG
54#else
55void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
56 unsigned long end, unsigned long floor,
57 unsigned long ceiling);
1da177e4
LT
58#endif
59
60#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
42b88bef
DG
61/*
62 * If the arch doesn't supply something else, assume that hugepage
63 * size aligned regions are ok without further preparation.
64 */
68589bc3
HD
65static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
66 pgoff_t pgoff)
42b88bef 67{
68589bc3
HD
68 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
69 return -EINVAL;
42b88bef
DG
70 if (len & ~HPAGE_MASK)
71 return -EINVAL;
72 if (addr & ~HPAGE_MASK)
73 return -EINVAL;
74 return 0;
75}
1da177e4 76#else
68589bc3
HD
77int prepare_hugepage_range(unsigned long addr, unsigned long len,
78 pgoff_t pgoff);
1da177e4
LT
79#endif
80
63551ae0
DG
81#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
82#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
83#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
84#else
85void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
86 pte_t *ptep, pte_t pte);
87pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
88 pte_t *ptep);
89#endif
90
91#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
92#define hugetlb_prefault_arch_hook(mm) do { } while (0)
93#else
94void hugetlb_prefault_arch_hook(struct mm_struct *mm);
95#endif
96
1da177e4
LT
97#else /* !CONFIG_HUGETLB_PAGE */
98
99static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
100{
101 return 0;
102}
103static inline unsigned long hugetlb_total_pages(void)
104{
105 return 0;
106}
107
108#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
109#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
110#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
111#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
1da177e4 112#define unmap_hugepage_range(vma, start, end) BUG()
1da177e4
LT
113#define hugetlb_report_meminfo(buf) 0
114#define hugetlb_report_node_meminfo(n, buf) 0
115#define follow_huge_pmd(mm, addr, pmd, write) NULL
68589bc3 116#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
1da177e4
LT
117#define pmd_huge(x) 0
118#define is_hugepage_only_range(mm, addr, len) 0
9da61aef 119#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
ac9b9c66 120#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
1da177e4 121
8f860591
ZY
122#define hugetlb_change_protection(vma, address, end, newprot)
123
1da177e4 124#ifndef HPAGE_MASK
51c6f666
RH
125#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
126#define HPAGE_SIZE PAGE_SIZE
1da177e4
LT
127#endif
128
129#endif /* !CONFIG_HUGETLB_PAGE */
130
131#ifdef CONFIG_HUGETLBFS
132struct hugetlbfs_config {
133 uid_t uid;
134 gid_t gid;
135 umode_t mode;
136 long nr_blocks;
137 long nr_inodes;
138};
139
140struct hugetlbfs_sb_info {
141 long max_blocks; /* blocks allowed */
142 long free_blocks; /* blocks free */
143 long max_inodes; /* inodes allowed */
144 long free_inodes; /* inodes free */
145 spinlock_t stat_lock;
146};
147
148
149struct hugetlbfs_inode_info {
150 struct shared_policy policy;
151 struct inode vfs_inode;
152};
153
154static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
155{
156 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
157}
158
159static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
160{
161 return sb->s_fs_info;
162}
163
4b6f5d20 164extern const struct file_operations hugetlbfs_file_operations;
1da177e4 165extern struct vm_operations_struct hugetlb_vm_ops;
9d66586f 166struct file *hugetlb_file_setup(const char *name, size_t);
1da177e4
LT
167int hugetlb_get_quota(struct address_space *mapping);
168void hugetlb_put_quota(struct address_space *mapping);
169
170static inline int is_file_hugepages(struct file *file)
171{
516dffdc
AL
172 if (file->f_op == &hugetlbfs_file_operations)
173 return 1;
174 if (is_file_shm_hugepages(file))
175 return 1;
176
177 return 0;
1da177e4
LT
178}
179
180static inline void set_file_hugepages(struct file *file)
181{
182 file->f_op = &hugetlbfs_file_operations;
183}
184#else /* !CONFIG_HUGETLBFS */
185
186#define is_file_hugepages(file) 0
187#define set_file_hugepages(file) BUG()
9d66586f 188#define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS)
1da177e4
LT
189
190#endif /* !CONFIG_HUGETLBFS */
191
d2ba27e8
AB
192#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
193unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
194 unsigned long len, unsigned long pgoff,
195 unsigned long flags);
196#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
197
1da177e4 198#endif /* _LINUX_HUGETLB_H */