Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
c94c2acf MW |
2 | #ifndef _LINUX_DAX_H |
3 | #define _LINUX_DAX_H | |
4 | ||
5 | #include <linux/fs.h> | |
6 | #include <linux/mm.h> | |
4f622938 | 7 | #include <linux/radix-tree.h> |
c94c2acf MW |
8 | #include <asm/pgtable.h> |
9 | ||
fefc1d97 PG |
10 | /* Flag for synchronous flush */ |
11 | #define DAXDEV_F_SYNC (1UL << 0) | |
12 | ||
27359fd6 MW |
13 | typedef unsigned long dax_entry_t; |
14 | ||
a254e568 | 15 | struct iomap_ops; |
6568b08b DW |
16 | struct dax_device; |
17 | struct dax_operations { | |
18 | /* | |
19 | * direct_access: translate a device-relative | |
20 | * logical-page-offset into an absolute physical pfn. Return the | |
21 | * number of pages available for DAX at that pfn. | |
22 | */ | |
23 | long (*direct_access)(struct dax_device *, pgoff_t, long, | |
24 | void **, pfn_t *); | |
7bf7eac8 DW |
25 | /* |
26 | * Validate whether this device is usable as an fsdax backing | |
27 | * device. | |
28 | */ | |
29 | bool (*dax_supported)(struct dax_device *, struct block_device *, int, | |
30 | sector_t, sector_t); | |
5d61e43b | 31 | /* copy_from_iter: required operation for fs-dax direct-i/o */ |
0aed55af DW |
32 | size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, |
33 | struct iov_iter *); | |
b3a9a0c3 DW |
34 | /* copy_to_iter: required operation for fs-dax direct-i/o */ |
35 | size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, | |
36 | struct iov_iter *); | |
6568b08b | 37 | }; |
a254e568 | 38 | |
6e0c90d6 DW |
39 | extern struct attribute_group dax_attribute_group; |
40 | ||
f5705aa8 DW |
41 | #if IS_ENABLED(CONFIG_DAX) |
42 | struct dax_device *dax_get_by_host(const char *host); | |
976431b0 | 43 | struct dax_device *alloc_dax(void *private, const char *host, |
fefc1d97 | 44 | const struct dax_operations *ops, unsigned long flags); |
f5705aa8 | 45 | void put_dax(struct dax_device *dax_dev); |
976431b0 DW |
46 | void kill_dax(struct dax_device *dax_dev); |
47 | void dax_write_cache(struct dax_device *dax_dev, bool wc); | |
48 | bool dax_write_cache_enabled(struct dax_device *dax_dev); | |
fefc1d97 PG |
49 | bool __dax_synchronous(struct dax_device *dax_dev); |
50 | static inline bool dax_synchronous(struct dax_device *dax_dev) | |
51 | { | |
52 | return __dax_synchronous(dax_dev); | |
53 | } | |
54 | void __set_dax_synchronous(struct dax_device *dax_dev); | |
55 | static inline void set_dax_synchronous(struct dax_device *dax_dev) | |
56 | { | |
57 | __set_dax_synchronous(dax_dev); | |
58 | } | |
32de1484 PG |
59 | /* |
60 | * Check if given mapping is supported by the file / underlying device. | |
61 | */ | |
62 | static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, | |
63 | struct dax_device *dax_dev) | |
64 | { | |
65 | if (!(vma->vm_flags & VM_SYNC)) | |
66 | return true; | |
67 | if (!IS_DAX(file_inode(vma->vm_file))) | |
68 | return false; | |
69 | return dax_synchronous(dax_dev); | |
70 | } | |
f5705aa8 DW |
71 | #else |
72 | static inline struct dax_device *dax_get_by_host(const char *host) | |
73 | { | |
74 | return NULL; | |
75 | } | |
976431b0 | 76 | static inline struct dax_device *alloc_dax(void *private, const char *host, |
fefc1d97 | 77 | const struct dax_operations *ops, unsigned long flags) |
976431b0 DW |
78 | { |
79 | /* | |
80 | * Callers should check IS_ENABLED(CONFIG_DAX) to know if this | |
81 | * NULL is an error or expected. | |
82 | */ | |
83 | return NULL; | |
84 | } | |
f5705aa8 DW |
85 | static inline void put_dax(struct dax_device *dax_dev) |
86 | { | |
87 | } | |
976431b0 DW |
88 | static inline void kill_dax(struct dax_device *dax_dev) |
89 | { | |
90 | } | |
91 | static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) | |
92 | { | |
93 | } | |
94 | static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) | |
95 | { | |
96 | return false; | |
97 | } | |
fefc1d97 PG |
98 | static inline bool dax_synchronous(struct dax_device *dax_dev) |
99 | { | |
100 | return true; | |
101 | } | |
102 | static inline void set_dax_synchronous(struct dax_device *dax_dev) | |
103 | { | |
104 | } | |
32de1484 PG |
105 | static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, |
106 | struct dax_device *dax_dev) | |
107 | { | |
108 | return !(vma->vm_flags & VM_SYNC); | |
109 | } | |
f5705aa8 DW |
110 | #endif |
111 | ||
f44c7763 | 112 | struct writeback_control; |
ef510424 DW |
113 | int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); |
114 | #if IS_ENABLED(CONFIG_FS_DAX) | |
80660f20 DJ |
115 | bool __bdev_dax_supported(struct block_device *bdev, int blocksize); |
116 | static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) | |
ef510424 | 117 | { |
ba23cba9 | 118 | return __bdev_dax_supported(bdev, blocksize); |
ef510424 | 119 | } |
f5705aa8 | 120 | |
7bf7eac8 DW |
121 | bool __generic_fsdax_supported(struct dax_device *dax_dev, |
122 | struct block_device *bdev, int blocksize, sector_t start, | |
123 | sector_t sectors); | |
124 | static inline bool generic_fsdax_supported(struct dax_device *dax_dev, | |
125 | struct block_device *bdev, int blocksize, sector_t start, | |
126 | sector_t sectors) | |
127 | { | |
128 | return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, | |
129 | sectors); | |
130 | } | |
131 | ||
f5705aa8 DW |
132 | static inline struct dax_device *fs_dax_get_by_host(const char *host) |
133 | { | |
134 | return dax_get_by_host(host); | |
135 | } | |
136 | ||
137 | static inline void fs_put_dax(struct dax_device *dax_dev) | |
138 | { | |
139 | put_dax(dax_dev); | |
140 | } | |
141 | ||
78f35473 | 142 | struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); |
f44c7763 DW |
143 | int dax_writeback_mapping_range(struct address_space *mapping, |
144 | struct block_device *bdev, struct writeback_control *wbc); | |
5fac7408 DW |
145 | |
146 | struct page *dax_layout_busy_page(struct address_space *mapping); | |
27359fd6 MW |
147 | dax_entry_t dax_lock_page(struct page *page); |
148 | void dax_unlock_page(struct page *page, dax_entry_t cookie); | |
ef510424 | 149 | #else |
80660f20 | 150 | static inline bool bdev_dax_supported(struct block_device *bdev, |
ba23cba9 | 151 | int blocksize) |
ef510424 | 152 | { |
80660f20 | 153 | return false; |
ef510424 | 154 | } |
ef510424 | 155 | |
7bf7eac8 DW |
156 | static inline bool generic_fsdax_supported(struct dax_device *dax_dev, |
157 | struct block_device *bdev, int blocksize, sector_t start, | |
158 | sector_t sectors) | |
159 | { | |
160 | return false; | |
161 | } | |
162 | ||
f5705aa8 | 163 | static inline struct dax_device *fs_dax_get_by_host(const char *host) |
ef510424 DW |
164 | { |
165 | return NULL; | |
166 | } | |
167 | ||
f5705aa8 | 168 | static inline void fs_put_dax(struct dax_device *dax_dev) |
ef510424 DW |
169 | { |
170 | } | |
78f35473 DW |
171 | |
172 | static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) | |
173 | { | |
174 | return NULL; | |
175 | } | |
f44c7763 | 176 | |
5fac7408 DW |
177 | static inline struct page *dax_layout_busy_page(struct address_space *mapping) |
178 | { | |
179 | return NULL; | |
180 | } | |
181 | ||
f44c7763 DW |
182 | static inline int dax_writeback_mapping_range(struct address_space *mapping, |
183 | struct block_device *bdev, struct writeback_control *wbc) | |
184 | { | |
185 | return -EOPNOTSUPP; | |
186 | } | |
c2a7d2a1 | 187 | |
27359fd6 | 188 | static inline dax_entry_t dax_lock_page(struct page *page) |
c2a7d2a1 DW |
189 | { |
190 | if (IS_DAX(page->mapping->host)) | |
27359fd6 MW |
191 | return ~0UL; |
192 | return 0; | |
c2a7d2a1 DW |
193 | } |
194 | ||
27359fd6 | 195 | static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) |
c2a7d2a1 DW |
196 | { |
197 | } | |
ef510424 DW |
198 | #endif |
199 | ||
7b6be844 DW |
200 | int dax_read_lock(void); |
201 | void dax_read_unlock(int id); | |
c1d6e828 | 202 | bool dax_alive(struct dax_device *dax_dev); |
c1d6e828 | 203 | void *dax_get_private(struct dax_device *dax_dev); |
b0686260 DW |
204 | long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, |
205 | void **kaddr, pfn_t *pfn); | |
7bf7eac8 DW |
206 | bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, |
207 | int blocksize, sector_t start, sector_t len); | |
7e026c8c DW |
208 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
209 | size_t bytes, struct iov_iter *i); | |
b3a9a0c3 DW |
210 | size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
211 | size_t bytes, struct iov_iter *i); | |
c3ca015f | 212 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); |
7b6be844 | 213 | |
11c59c92 | 214 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
8ff6daa1 | 215 | const struct iomap_ops *ops); |
f77bc3a8 | 216 | vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, |
c0b24625 | 217 | pfn_t *pfnp, int *errp, const struct iomap_ops *ops); |
ab77dab4 SJ |
218 | vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, |
219 | enum page_entry_size pe_size, pfn_t pfn); | |
ac401cc7 | 220 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
c6dcf52c JK |
221 | int dax_invalidate_mapping_entry_sync(struct address_space *mapping, |
222 | pgoff_t index); | |
d1a5f2b4 DW |
223 | |
224 | #ifdef CONFIG_FS_DAX | |
cccbce67 DW |
225 | int __dax_zero_page_range(struct block_device *bdev, |
226 | struct dax_device *dax_dev, sector_t sector, | |
679c8bd3 | 227 | unsigned int offset, unsigned int length); |
d1a5f2b4 | 228 | #else |
679c8bd3 | 229 | static inline int __dax_zero_page_range(struct block_device *bdev, |
cccbce67 DW |
230 | struct dax_device *dax_dev, sector_t sector, |
231 | unsigned int offset, unsigned int length) | |
679c8bd3 CH |
232 | { |
233 | return -ENXIO; | |
234 | } | |
d1a5f2b4 DW |
235 | #endif |
236 | ||
f9fe48be RZ |
237 | static inline bool dax_mapping(struct address_space *mapping) |
238 | { | |
239 | return mapping->host && IS_DAX(mapping->host); | |
240 | } | |
7f6d5b52 | 241 | |
c94c2acf | 242 | #endif |