Commit | Line | Data |
---|---|---|
b533a83f DH |
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* Network filesystem support services. | |
3 | * | |
4 | * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
6 | * | |
7 | * See: | |
8 | * | |
9 | * Documentation/filesystems/netfs_library.rst | |
10 | * | |
11 | * for a description of the network filesystem interface declared here. | |
12 | */ | |
13 | ||
14 | #ifndef _LINUX_NETFS_H | |
15 | #define _LINUX_NETFS_H | |
16 | ||
3d3c9504 DH |
17 | #include <linux/workqueue.h> |
18 | #include <linux/fs.h> | |
b533a83f | 19 | #include <linux/pagemap.h> |
85dd2c8f | 20 | #include <linux/uio.h> |
b533a83f | 21 | |
6cd3d6fd DH |
22 | enum netfs_sreq_ref_trace; |
23 | ||
b533a83f DH |
24 | /* |
25 | * Overload PG_private_2 to give us PG_fscache - this is used to indicate that | |
26 | * a page is currently backed by a local disk cache | |
27 | */ | |
6abbaa5b | 28 | #define folio_test_fscache(folio) folio_test_private_2(folio) |
b533a83f DH |
29 | #define PageFsCache(page) PagePrivate2((page)) |
30 | #define SetPageFsCache(page) SetPagePrivate2((page)) | |
31 | #define ClearPageFsCache(page) ClearPagePrivate2((page)) | |
32 | #define TestSetPageFsCache(page) TestSetPagePrivate2((page)) | |
33 | #define TestClearPageFsCache(page) TestClearPagePrivate2((page)) | |
34 | ||
99bff93c | 35 | /** |
6abbaa5b MWO |
36 | * folio_start_fscache - Start an fscache write on a folio. |
37 | * @folio: The folio. | |
99bff93c | 38 | * |
6abbaa5b MWO |
39 | * Call this function before writing a folio to a local cache. Starting a |
40 | * second write before the first one finishes is not allowed. | |
99bff93c | 41 | */ |
6abbaa5b | 42 | static inline void folio_start_fscache(struct folio *folio) |
99bff93c | 43 | { |
6abbaa5b MWO |
44 | VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio); |
45 | folio_get(folio); | |
46 | folio_set_private_2(folio); | |
99bff93c DH |
47 | } |
48 | ||
49 | /** | |
6abbaa5b MWO |
50 | * folio_end_fscache - End an fscache write on a folio. |
51 | * @folio: The folio. | |
99bff93c | 52 | * |
6abbaa5b MWO |
53 | * Call this function after the folio has been written to the local cache. |
54 | * This will wake any sleepers waiting on this folio. | |
99bff93c | 55 | */ |
6abbaa5b | 56 | static inline void folio_end_fscache(struct folio *folio) |
99bff93c | 57 | { |
6abbaa5b | 58 | folio_end_private_2(folio); |
99bff93c DH |
59 | } |
60 | ||
61 | /** | |
6abbaa5b MWO |
62 | * folio_wait_fscache - Wait for an fscache write on this folio to end. |
63 | * @folio: The folio. | |
99bff93c | 64 | * |
6abbaa5b MWO |
65 | * If this folio is currently being written to a local cache, wait for |
66 | * the write to finish. Another write may start after this one finishes, | |
67 | * unless the caller holds the folio lock. | |
99bff93c | 68 | */ |
6abbaa5b | 69 | static inline void folio_wait_fscache(struct folio *folio) |
99bff93c | 70 | { |
6abbaa5b | 71 | folio_wait_private_2(folio); |
99bff93c DH |
72 | } |
73 | ||
74 | /** | |
6abbaa5b MWO |
75 | * folio_wait_fscache_killable - Wait for an fscache write on this folio to end. |
76 | * @folio: The folio. | |
99bff93c | 77 | * |
6abbaa5b MWO |
78 | * If this folio is currently being written to a local cache, wait |
79 | * for the write to finish or for a fatal signal to be received. | |
80 | * Another write may start after this one finishes, unless the caller | |
81 | * holds the folio lock. | |
99bff93c DH |
82 | * |
83 | * Return: | |
84 | * - 0 if successful. | |
85 | * - -EINTR if a fatal signal was encountered. | |
86 | */ | |
6abbaa5b MWO |
87 | static inline int folio_wait_fscache_killable(struct folio *folio) |
88 | { | |
89 | return folio_wait_private_2_killable(folio); | |
90 | } | |
91 | ||
92 | static inline void set_page_fscache(struct page *page) | |
93 | { | |
94 | folio_start_fscache(page_folio(page)); | |
95 | } | |
96 | ||
97 | static inline void end_page_fscache(struct page *page) | |
98 | { | |
99 | folio_end_private_2(page_folio(page)); | |
100 | } | |
101 | ||
102 | static inline void wait_on_page_fscache(struct page *page) | |
103 | { | |
104 | folio_wait_private_2(page_folio(page)); | |
105 | } | |
106 | ||
99bff93c DH |
107 | static inline int wait_on_page_fscache_killable(struct page *page) |
108 | { | |
b47393f8 | 109 | return folio_wait_private_2_killable(page_folio(page)); |
99bff93c DH |
110 | } |
111 | ||
6a19114b | 112 | enum netfs_io_source { |
3d3c9504 DH |
113 | NETFS_FILL_WITH_ZEROES, |
114 | NETFS_DOWNLOAD_FROM_SERVER, | |
115 | NETFS_READ_FROM_CACHE, | |
116 | NETFS_INVALID_READ, | |
117 | } __mode(byte); | |
118 | ||
726218fd DH |
119 | typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, |
120 | bool was_async); | |
121 | ||
bc899ee1 | 122 | /* |
874c8ca1 | 123 | * Per-inode context. This wraps the VFS inode. |
bc899ee1 | 124 | */ |
874c8ca1 DH |
125 | struct netfs_inode { |
126 | struct inode inode; /* The VFS inode */ | |
bc899ee1 DH |
127 | const struct netfs_request_ops *ops; |
128 | #if IS_ENABLED(CONFIG_FSCACHE) | |
129 | struct fscache_cookie *cache; | |
130 | #endif | |
4058f742 | 131 | loff_t remote_i_size; /* Size of the remote file */ |
46ed60dc DH |
132 | unsigned long flags; |
133 | #define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */ | |
bc899ee1 DH |
134 | }; |
135 | ||
726218fd DH |
136 | /* |
137 | * Resources required to do operations on a cache. | |
138 | */ | |
139 | struct netfs_cache_resources { | |
140 | const struct netfs_cache_ops *ops; | |
141 | void *cache_priv; | |
142 | void *cache_priv2; | |
a7e20e31 | 143 | unsigned int debug_id; /* Cookie debug ID */ |
d24af13e | 144 | unsigned int inval_counter; /* object->inval_counter at begin_op */ |
726218fd DH |
145 | }; |
146 | ||
3d3c9504 DH |
147 | /* |
148 | * Descriptor for a single component subrequest. | |
149 | */ | |
6a19114b | 150 | struct netfs_io_subrequest { |
f18a3785 | 151 | struct netfs_io_request *rreq; /* Supervising I/O request */ |
3d3c9504 | 152 | struct list_head rreq_link; /* Link in rreq->subrequests */ |
92b6cc5d | 153 | struct iov_iter io_iter; /* Iterator for this subrequest */ |
3d3c9504 DH |
154 | loff_t start; /* Where to start the I/O */ |
155 | size_t len; /* Size of the I/O */ | |
156 | size_t transferred; /* Amount of data transferred */ | |
6cd3d6fd | 157 | refcount_t ref; |
3d3c9504 DH |
158 | short error; /* 0 or error that occurred */ |
159 | unsigned short debug_index; /* Index in list (for debugging output) */ | |
f18a3785 | 160 | enum netfs_io_source source; /* Where to read from/write to */ |
3d3c9504 | 161 | unsigned long flags; |
f18a3785 | 162 | #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ |
3d3c9504 | 163 | #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ |
f18a3785 | 164 | #define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */ |
3d3c9504 DH |
165 | #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ |
166 | #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ | |
9032b6e8 | 167 | #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ |
3d3c9504 DH |
168 | }; |
169 | ||
663dfb65 DH |
170 | enum netfs_io_origin { |
171 | NETFS_READAHEAD, /* This read was triggered by readahead */ | |
172 | NETFS_READPAGE, /* This read is a synchronous read */ | |
173 | NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ | |
174 | } __mode(byte); | |
175 | ||
3d3c9504 | 176 | /* |
f18a3785 DH |
177 | * Descriptor for an I/O helper request. This is used to make multiple I/O |
178 | * operations to a variety of data stores and then stitch the result together. | |
3d3c9504 | 179 | */ |
6a19114b | 180 | struct netfs_io_request { |
87b57a04 DH |
181 | union { |
182 | struct work_struct work; | |
183 | struct rcu_head rcu; | |
184 | }; | |
3d3c9504 DH |
185 | struct inode *inode; /* The file being accessed */ |
186 | struct address_space *mapping; /* The mapping being accessed */ | |
726218fd | 187 | struct netfs_cache_resources cache_resources; |
87b57a04 | 188 | struct list_head proc_link; /* Link in netfs_iorequests */ |
f18a3785 | 189 | struct list_head subrequests; /* Contributory I/O operations */ |
92b6cc5d DH |
190 | struct iov_iter iter; /* Unencrypted-side iterator */ |
191 | struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ | |
3d3c9504 | 192 | void *netfs_priv; /* Private data for the netfs */ |
21d706d5 DH |
193 | struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ |
194 | unsigned int direct_bv_count; /* Number of elements in direct_bv[] */ | |
3d3c9504 | 195 | unsigned int debug_id; |
f18a3785 DH |
196 | atomic_t nr_outstanding; /* Number of ops in progress */ |
197 | atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ | |
3d3c9504 DH |
198 | size_t submitted; /* Amount submitted for I/O so far */ |
199 | size_t len; /* Length of the request */ | |
200 | short error; /* 0 or error that occurred */ | |
663dfb65 | 201 | enum netfs_io_origin origin; /* Origin of the request */ |
21d706d5 | 202 | bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ |
3d3c9504 DH |
203 | loff_t i_size; /* Size of the file */ |
204 | loff_t start; /* Start position */ | |
78525c74 | 205 | pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ |
de74023b | 206 | refcount_t ref; |
3d3c9504 DH |
207 | unsigned long flags; |
208 | #define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */ | |
f18a3785 | 209 | #define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ |
78525c74 DH |
210 | #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ |
211 | #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ | |
3d3c9504 DH |
212 | #define NETFS_RREQ_FAILED 4 /* The request failed */ |
213 | #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ | |
6a19114b | 214 | const struct netfs_request_ops *netfs_ops; |
3d3c9504 DH |
215 | }; |
216 | ||
217 | /* | |
218 | * Operations the network filesystem can/must provide to the helpers. | |
219 | */ | |
6a19114b | 220 | struct netfs_request_ops { |
cc3cb0a1 DH |
221 | unsigned int io_request_size; /* Alloc size for netfs_io_request struct */ |
222 | unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */ | |
2de16041 | 223 | int (*init_request)(struct netfs_io_request *rreq, struct file *file); |
40a81101 | 224 | void (*free_request)(struct netfs_io_request *rreq); |
5f5ce7ba | 225 | void (*free_subrequest)(struct netfs_io_subrequest *rreq); |
40a81101 | 226 | |
6a19114b DH |
227 | void (*expand_readahead)(struct netfs_io_request *rreq); |
228 | bool (*clamp_length)(struct netfs_io_subrequest *subreq); | |
f18a3785 | 229 | void (*issue_read)(struct netfs_io_subrequest *subreq); |
6a19114b | 230 | bool (*is_still_valid)(struct netfs_io_request *rreq); |
e1b1240c | 231 | int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, |
fac47b43 | 232 | struct folio **foliop, void **_fsdata); |
6a19114b | 233 | void (*done)(struct netfs_io_request *rreq); |
3d3c9504 DH |
234 | }; |
235 | ||
3a11b3a8 DH |
236 | /* |
237 | * How to handle reading from a hole. | |
238 | */ | |
239 | enum netfs_read_from_hole { | |
240 | NETFS_READ_HOLE_IGNORE, | |
241 | NETFS_READ_HOLE_CLEAR, | |
242 | NETFS_READ_HOLE_FAIL, | |
243 | }; | |
244 | ||
726218fd | 245 | /* |
4498a8ec | 246 | * Table of operations for access to a cache. |
726218fd DH |
247 | */ |
248 | struct netfs_cache_ops { | |
249 | /* End an operation */ | |
250 | void (*end_operation)(struct netfs_cache_resources *cres); | |
251 | ||
252 | /* Read data from the cache */ | |
253 | int (*read)(struct netfs_cache_resources *cres, | |
254 | loff_t start_pos, | |
255 | struct iov_iter *iter, | |
3a11b3a8 | 256 | enum netfs_read_from_hole read_hole, |
726218fd DH |
257 | netfs_io_terminated_t term_func, |
258 | void *term_func_priv); | |
259 | ||
260 | /* Write data to the cache */ | |
261 | int (*write)(struct netfs_cache_resources *cres, | |
262 | loff_t start_pos, | |
263 | struct iov_iter *iter, | |
264 | netfs_io_terminated_t term_func, | |
265 | void *term_func_priv); | |
266 | ||
267 | /* Expand readahead request */ | |
268 | void (*expand_readahead)(struct netfs_cache_resources *cres, | |
269 | loff_t *_start, size_t *_len, loff_t i_size); | |
270 | ||
271 | /* Prepare a read operation, shortening it to a cached/uncached | |
272 | * boundary as appropriate. | |
273 | */ | |
6a19114b | 274 | enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, |
874c8ca1 | 275 | loff_t i_size); |
726218fd DH |
276 | |
277 | /* Prepare a write operation, working out what part of the write we can | |
278 | * actually do. | |
279 | */ | |
280 | int (*prepare_write)(struct netfs_cache_resources *cres, | |
a39c41b8 DH |
281 | loff_t *_start, size_t *_len, loff_t i_size, |
282 | bool no_space_allocated_yet); | |
bee9f655 | 283 | |
86692475 JX |
284 | /* Prepare an on-demand read operation, shortening it to a cached/uncached |
285 | * boundary as appropriate. | |
286 | */ | |
287 | enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres, | |
288 | loff_t start, size_t *_len, | |
289 | loff_t i_size, | |
290 | unsigned long *_flags, ino_t ino); | |
291 | ||
bee9f655 DH |
292 | /* Query the occupancy of the cache in a region, returning where the |
293 | * next chunk of data starts and how long it is. | |
294 | */ | |
295 | int (*query_occupancy)(struct netfs_cache_resources *cres, | |
296 | loff_t start, size_t len, size_t granularity, | |
297 | loff_t *_data_start, size_t *_data_len); | |
726218fd DH |
298 | }; |
299 | ||
3d3c9504 | 300 | struct readahead_control; |
0e8e08cc | 301 | void netfs_readahead(struct readahead_control *); |
6c62371b | 302 | int netfs_read_folio(struct file *, struct folio *); |
0e8e08cc | 303 | int netfs_write_begin(struct netfs_inode *, struct file *, |
c1ec4d7c DH |
304 | struct address_space *, loff_t pos, unsigned int len, |
305 | struct folio **, void **fsdata); | |
c9c4ff12 DH |
306 | bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); |
307 | int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); | |
308 | void netfs_clear_inode_writeback(struct inode *inode, const void *aux); | |
c1ec4d7c DH |
309 | void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length); |
310 | bool netfs_release_folio(struct folio *folio, gfp_t gfp); | |
0e8e08cc MWO |
311 | |
312 | void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); | |
313 | void netfs_get_subrequest(struct netfs_io_subrequest *subreq, | |
314 | enum netfs_sreq_ref_trace what); | |
315 | void netfs_put_subrequest(struct netfs_io_subrequest *subreq, | |
316 | bool was_async, enum netfs_sreq_ref_trace what); | |
85dd2c8f DH |
317 | ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, |
318 | struct iov_iter *new, | |
319 | iov_iter_extraction_t extraction_flags); | |
3d3c9504 | 320 | |
46ed60dc DH |
321 | int netfs_start_io_read(struct inode *inode); |
322 | void netfs_end_io_read(struct inode *inode); | |
323 | int netfs_start_io_write(struct inode *inode); | |
324 | void netfs_end_io_write(struct inode *inode); | |
325 | int netfs_start_io_direct(struct inode *inode); | |
326 | void netfs_end_io_direct(struct inode *inode); | |
327 | ||
bc899ee1 | 328 | /** |
874c8ca1 | 329 | * netfs_inode - Get the netfs inode context from the inode |
bc899ee1 DH |
330 | * @inode: The inode to query |
331 | * | |
332 | * Get the netfs lib inode context from the network filesystem's inode. The | |
333 | * context struct is expected to directly follow on from the VFS inode struct. | |
334 | */ | |
874c8ca1 | 335 | static inline struct netfs_inode *netfs_inode(struct inode *inode) |
bc899ee1 | 336 | { |
874c8ca1 | 337 | return container_of(inode, struct netfs_inode, inode); |
bc899ee1 DH |
338 | } |
339 | ||
340 | /** | |
874c8ca1 | 341 | * netfs_inode_init - Initialise a netfslib inode context |
018ab4fa | 342 | * @ctx: The netfs inode to initialise |
bc899ee1 DH |
343 | * @ops: The netfs's operations list |
344 | * | |
345 | * Initialise the netfs library context struct. This is expected to follow on | |
346 | * directly from the VFS inode struct. | |
347 | */ | |
e81fb419 | 348 | static inline void netfs_inode_init(struct netfs_inode *ctx, |
874c8ca1 | 349 | const struct netfs_request_ops *ops) |
bc899ee1 | 350 | { |
bc899ee1 | 351 | ctx->ops = ops; |
e81fb419 | 352 | ctx->remote_i_size = i_size_read(&ctx->inode); |
46ed60dc | 353 | ctx->flags = 0; |
874c8ca1 DH |
354 | #if IS_ENABLED(CONFIG_FSCACHE) |
355 | ctx->cache = NULL; | |
356 | #endif | |
4058f742 DH |
357 | } |
358 | ||
359 | /** | |
360 | * netfs_resize_file - Note that a file got resized | |
e81fb419 | 361 | * @ctx: The netfs inode being resized |
4058f742 DH |
362 | * @new_i_size: The new file size |
363 | * | |
364 | * Inform the netfs lib that a file got resized so that it can adjust its state. | |
365 | */ | |
e81fb419 | 366 | static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size) |
4058f742 | 367 | { |
4058f742 | 368 | ctx->remote_i_size = new_i_size; |
bc899ee1 DH |
369 | } |
370 | ||
371 | /** | |
372 | * netfs_i_cookie - Get the cache cookie from the inode | |
e81fb419 | 373 | * @ctx: The netfs inode to query |
bc899ee1 DH |
374 | * |
375 | * Get the caching cookie (if enabled) from the network filesystem's inode. | |
376 | */ | |
e81fb419 | 377 | static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx) |
bc899ee1 DH |
378 | { |
379 | #if IS_ENABLED(CONFIG_FSCACHE) | |
bc899ee1 DH |
380 | return ctx->cache; |
381 | #else | |
382 | return NULL; | |
383 | #endif | |
384 | } | |
385 | ||
b533a83f | 386 | #endif /* _LINUX_NETFS_H */ |