Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
c94c2acf MW |
2 | #ifndef _LINUX_DAX_H |
3 | #define _LINUX_DAX_H | |
4 | ||
5 | #include <linux/fs.h> | |
6 | #include <linux/mm.h> | |
4f622938 | 7 | #include <linux/radix-tree.h> |
c94c2acf | 8 | |
fefc1d97 PG |
9 | /* Flag for synchronous flush */ |
10 | #define DAXDEV_F_SYNC (1UL << 0) | |
11 | ||
27359fd6 MW |
12 | typedef unsigned long dax_entry_t; |
13 | ||
a254e568 | 14 | struct iomap_ops; |
4f3b4f16 | 15 | struct iomap; |
6568b08b DW |
16 | struct dax_device; |
17 | struct dax_operations { | |
18 | /* | |
19 | * direct_access: translate a device-relative | |
20 | * logical-page-offset into an absolute physical pfn. Return the | |
21 | * number of pages available for DAX at that pfn. | |
22 | */ | |
23 | long (*direct_access)(struct dax_device *, pgoff_t, long, | |
24 | void **, pfn_t *); | |
7bf7eac8 DW |
25 | /* |
26 | * Validate whether this device is usable as an fsdax backing | |
27 | * device. | |
28 | */ | |
29 | bool (*dax_supported)(struct dax_device *, struct block_device *, int, | |
30 | sector_t, sector_t); | |
5d61e43b | 31 | /* copy_from_iter: required operation for fs-dax direct-i/o */ |
0aed55af DW |
32 | size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, |
33 | struct iov_iter *); | |
b3a9a0c3 DW |
34 | /* copy_to_iter: required operation for fs-dax direct-i/o */ |
35 | size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, | |
36 | struct iov_iter *); | |
f605a263 VG |
37 | /* zero_page_range: required operation. Zero page range */ |
38 | int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); | |
6568b08b | 39 | }; |
a254e568 | 40 | |
6e0c90d6 DW |
41 | extern struct attribute_group dax_attribute_group; |
42 | ||
f5705aa8 DW |
43 | #if IS_ENABLED(CONFIG_DAX) |
44 | struct dax_device *dax_get_by_host(const char *host); | |
976431b0 | 45 | struct dax_device *alloc_dax(void *private, const char *host, |
fefc1d97 | 46 | const struct dax_operations *ops, unsigned long flags); |
f5705aa8 | 47 | void put_dax(struct dax_device *dax_dev); |
976431b0 DW |
48 | void kill_dax(struct dax_device *dax_dev); |
49 | void dax_write_cache(struct dax_device *dax_dev, bool wc); | |
50 | bool dax_write_cache_enabled(struct dax_device *dax_dev); | |
fefc1d97 PG |
51 | bool __dax_synchronous(struct dax_device *dax_dev); |
52 | static inline bool dax_synchronous(struct dax_device *dax_dev) | |
53 | { | |
54 | return __dax_synchronous(dax_dev); | |
55 | } | |
56 | void __set_dax_synchronous(struct dax_device *dax_dev); | |
57 | static inline void set_dax_synchronous(struct dax_device *dax_dev) | |
58 | { | |
59 | __set_dax_synchronous(dax_dev); | |
60 | } | |
32de1484 PG |
61 | /* |
62 | * Check if given mapping is supported by the file / underlying device. | |
63 | */ | |
64 | static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, | |
65 | struct dax_device *dax_dev) | |
66 | { | |
67 | if (!(vma->vm_flags & VM_SYNC)) | |
68 | return true; | |
69 | if (!IS_DAX(file_inode(vma->vm_file))) | |
70 | return false; | |
71 | return dax_synchronous(dax_dev); | |
72 | } | |
f5705aa8 DW |
73 | #else |
74 | static inline struct dax_device *dax_get_by_host(const char *host) | |
75 | { | |
76 | return NULL; | |
77 | } | |
976431b0 | 78 | static inline struct dax_device *alloc_dax(void *private, const char *host, |
fefc1d97 | 79 | const struct dax_operations *ops, unsigned long flags) |
976431b0 DW |
80 | { |
81 | /* | |
82 | * Callers should check IS_ENABLED(CONFIG_DAX) to know if this | |
83 | * NULL is an error or expected. | |
84 | */ | |
85 | return NULL; | |
86 | } | |
f5705aa8 DW |
87 | static inline void put_dax(struct dax_device *dax_dev) |
88 | { | |
89 | } | |
976431b0 DW |
90 | static inline void kill_dax(struct dax_device *dax_dev) |
91 | { | |
92 | } | |
93 | static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) | |
94 | { | |
95 | } | |
96 | static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) | |
97 | { | |
98 | return false; | |
99 | } | |
fefc1d97 PG |
100 | static inline bool dax_synchronous(struct dax_device *dax_dev) |
101 | { | |
102 | return true; | |
103 | } | |
104 | static inline void set_dax_synchronous(struct dax_device *dax_dev) | |
105 | { | |
106 | } | |
32de1484 PG |
107 | static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, |
108 | struct dax_device *dax_dev) | |
109 | { | |
110 | return !(vma->vm_flags & VM_SYNC); | |
111 | } | |
f5705aa8 DW |
112 | #endif |
113 | ||
f44c7763 | 114 | struct writeback_control; |
ef510424 DW |
115 | int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); |
116 | #if IS_ENABLED(CONFIG_FS_DAX) | |
80660f20 DJ |
117 | bool __bdev_dax_supported(struct block_device *bdev, int blocksize); |
118 | static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) | |
ef510424 | 119 | { |
ba23cba9 | 120 | return __bdev_dax_supported(bdev, blocksize); |
ef510424 | 121 | } |
f5705aa8 | 122 | |
7bf7eac8 DW |
123 | bool __generic_fsdax_supported(struct dax_device *dax_dev, |
124 | struct block_device *bdev, int blocksize, sector_t start, | |
125 | sector_t sectors); | |
126 | static inline bool generic_fsdax_supported(struct dax_device *dax_dev, | |
127 | struct block_device *bdev, int blocksize, sector_t start, | |
128 | sector_t sectors) | |
129 | { | |
130 | return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, | |
131 | sectors); | |
132 | } | |
133 | ||
f5705aa8 DW |
134 | static inline void fs_put_dax(struct dax_device *dax_dev) |
135 | { | |
136 | put_dax(dax_dev); | |
137 | } | |
138 | ||
78f35473 | 139 | struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); |
f44c7763 | 140 | int dax_writeback_mapping_range(struct address_space *mapping, |
3f666c56 | 141 | struct dax_device *dax_dev, struct writeback_control *wbc); |
5fac7408 DW |
142 | |
143 | struct page *dax_layout_busy_page(struct address_space *mapping); | |
27359fd6 MW |
144 | dax_entry_t dax_lock_page(struct page *page); |
145 | void dax_unlock_page(struct page *page, dax_entry_t cookie); | |
ef510424 | 146 | #else |
80660f20 | 147 | static inline bool bdev_dax_supported(struct block_device *bdev, |
ba23cba9 | 148 | int blocksize) |
ef510424 | 149 | { |
80660f20 | 150 | return false; |
ef510424 | 151 | } |
ef510424 | 152 | |
7bf7eac8 DW |
153 | static inline bool generic_fsdax_supported(struct dax_device *dax_dev, |
154 | struct block_device *bdev, int blocksize, sector_t start, | |
155 | sector_t sectors) | |
156 | { | |
157 | return false; | |
158 | } | |
159 | ||
f5705aa8 | 160 | static inline void fs_put_dax(struct dax_device *dax_dev) |
ef510424 DW |
161 | { |
162 | } | |
78f35473 DW |
163 | |
164 | static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) | |
165 | { | |
166 | return NULL; | |
167 | } | |
f44c7763 | 168 | |
5fac7408 DW |
169 | static inline struct page *dax_layout_busy_page(struct address_space *mapping) |
170 | { | |
171 | return NULL; | |
172 | } | |
173 | ||
f44c7763 | 174 | static inline int dax_writeback_mapping_range(struct address_space *mapping, |
3f666c56 | 175 | struct dax_device *dax_dev, struct writeback_control *wbc) |
f44c7763 DW |
176 | { |
177 | return -EOPNOTSUPP; | |
178 | } | |
c2a7d2a1 | 179 | |
27359fd6 | 180 | static inline dax_entry_t dax_lock_page(struct page *page) |
c2a7d2a1 DW |
181 | { |
182 | if (IS_DAX(page->mapping->host)) | |
27359fd6 MW |
183 | return ~0UL; |
184 | return 0; | |
c2a7d2a1 DW |
185 | } |
186 | ||
27359fd6 | 187 | static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) |
c2a7d2a1 DW |
188 | { |
189 | } | |
ef510424 DW |
190 | #endif |
191 | ||
7b6be844 DW |
192 | int dax_read_lock(void); |
193 | void dax_read_unlock(int id); | |
c1d6e828 | 194 | bool dax_alive(struct dax_device *dax_dev); |
c1d6e828 | 195 | void *dax_get_private(struct dax_device *dax_dev); |
b0686260 DW |
196 | long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, |
197 | void **kaddr, pfn_t *pfn); | |
7bf7eac8 DW |
198 | bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, |
199 | int blocksize, sector_t start, sector_t len); | |
7e026c8c DW |
200 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
201 | size_t bytes, struct iov_iter *i); | |
b3a9a0c3 DW |
202 | size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
203 | size_t bytes, struct iov_iter *i); | |
f605a263 VG |
204 | int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, |
205 | size_t nr_pages); | |
c3ca015f | 206 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); |
7b6be844 | 207 | |
11c59c92 | 208 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
8ff6daa1 | 209 | const struct iomap_ops *ops); |
f77bc3a8 | 210 | vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, |
c0b24625 | 211 | pfn_t *pfnp, int *errp, const struct iomap_ops *ops); |
ab77dab4 SJ |
212 | vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, |
213 | enum page_entry_size pe_size, pfn_t pfn); | |
ac401cc7 | 214 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
c6dcf52c JK |
215 | int dax_invalidate_mapping_entry_sync(struct address_space *mapping, |
216 | pgoff_t index); | |
4f3b4f16 VG |
217 | int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size, |
218 | struct iomap *iomap); | |
f9fe48be RZ |
219 | static inline bool dax_mapping(struct address_space *mapping) |
220 | { | |
221 | return mapping->host && IS_DAX(mapping->host); | |
222 | } | |
7f6d5b52 | 223 | |
c94c2acf | 224 | #endif |