Commit | Line | Data |
---|---|---|
b533a83f DH |
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* Network filesystem support services. | |
3 | * | |
4 | * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
6 | * | |
7 | * See: | |
8 | * | |
9 | * Documentation/filesystems/netfs_library.rst | |
10 | * | |
11 | * for a description of the network filesystem interface declared here. | |
12 | */ | |
13 | ||
14 | #ifndef _LINUX_NETFS_H | |
15 | #define _LINUX_NETFS_H | |
16 | ||
3d3c9504 DH |
17 | #include <linux/workqueue.h> |
18 | #include <linux/fs.h> | |
b533a83f | 19 | #include <linux/pagemap.h> |
85dd2c8f | 20 | #include <linux/uio.h> |
b533a83f | 21 | |
6cd3d6fd DH |
22 | enum netfs_sreq_ref_trace; |
23 | ||
b533a83f DH |
24 | /* |
25 | * Overload PG_private_2 to give us PG_fscache - this is used to indicate that | |
26 | * a page is currently backed by a local disk cache | |
27 | */ | |
6abbaa5b | 28 | #define folio_test_fscache(folio) folio_test_private_2(folio) |
b533a83f DH |
29 | #define PageFsCache(page) PagePrivate2((page)) |
30 | #define SetPageFsCache(page) SetPagePrivate2((page)) | |
31 | #define ClearPageFsCache(page) ClearPagePrivate2((page)) | |
32 | #define TestSetPageFsCache(page) TestSetPagePrivate2((page)) | |
33 | #define TestClearPageFsCache(page) TestClearPagePrivate2((page)) | |
34 | ||
99bff93c | 35 | /** |
6abbaa5b MWO |
36 | * folio_start_fscache - Start an fscache write on a folio. |
37 | * @folio: The folio. | |
99bff93c | 38 | * |
6abbaa5b MWO |
39 | * Call this function before writing a folio to a local cache. Starting a |
40 | * second write before the first one finishes is not allowed. | |
99bff93c | 41 | */ |
6abbaa5b | 42 | static inline void folio_start_fscache(struct folio *folio) |
99bff93c | 43 | { |
6abbaa5b MWO |
44 | VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio); |
45 | folio_get(folio); | |
46 | folio_set_private_2(folio); | |
99bff93c DH |
47 | } |
48 | ||
49 | /** | |
6abbaa5b MWO |
50 | * folio_end_fscache - End an fscache write on a folio. |
51 | * @folio: The folio. | |
99bff93c | 52 | * |
6abbaa5b MWO |
53 | * Call this function after the folio has been written to the local cache. |
54 | * This will wake any sleepers waiting on this folio. | |
99bff93c | 55 | */ |
6abbaa5b | 56 | static inline void folio_end_fscache(struct folio *folio) |
99bff93c | 57 | { |
6abbaa5b | 58 | folio_end_private_2(folio); |
99bff93c DH |
59 | } |
60 | ||
61 | /** | |
6abbaa5b MWO |
62 | * folio_wait_fscache - Wait for an fscache write on this folio to end. |
63 | * @folio: The folio. | |
99bff93c | 64 | * |
6abbaa5b MWO |
65 | * If this folio is currently being written to a local cache, wait for |
66 | * the write to finish. Another write may start after this one finishes, | |
67 | * unless the caller holds the folio lock. | |
99bff93c | 68 | */ |
6abbaa5b | 69 | static inline void folio_wait_fscache(struct folio *folio) |
99bff93c | 70 | { |
6abbaa5b | 71 | folio_wait_private_2(folio); |
99bff93c DH |
72 | } |
73 | ||
74 | /** | |
6abbaa5b MWO |
75 | * folio_wait_fscache_killable - Wait for an fscache write on this folio to end. |
76 | * @folio: The folio. | |
99bff93c | 77 | * |
6abbaa5b MWO |
78 | * If this folio is currently being written to a local cache, wait |
79 | * for the write to finish or for a fatal signal to be received. | |
80 | * Another write may start after this one finishes, unless the caller | |
81 | * holds the folio lock. | |
99bff93c DH |
82 | * |
83 | * Return: | |
84 | * - 0 if successful. | |
85 | * - -EINTR if a fatal signal was encountered. | |
86 | */ | |
6abbaa5b MWO |
87 | static inline int folio_wait_fscache_killable(struct folio *folio) |
88 | { | |
89 | return folio_wait_private_2_killable(folio); | |
90 | } | |
91 | ||
92 | static inline void set_page_fscache(struct page *page) | |
93 | { | |
94 | folio_start_fscache(page_folio(page)); | |
95 | } | |
96 | ||
97 | static inline void end_page_fscache(struct page *page) | |
98 | { | |
99 | folio_end_private_2(page_folio(page)); | |
100 | } | |
101 | ||
102 | static inline void wait_on_page_fscache(struct page *page) | |
103 | { | |
104 | folio_wait_private_2(page_folio(page)); | |
105 | } | |
106 | ||
99bff93c DH |
107 | static inline int wait_on_page_fscache_killable(struct page *page) |
108 | { | |
b47393f8 | 109 | return folio_wait_private_2_killable(page_folio(page)); |
99bff93c DH |
110 | } |
111 | ||
7d828a06 DH |
112 | /* Marks used on xarray-based buffers */ |
113 | #define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */ | |
114 | #define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */ | |
115 | ||
6a19114b | 116 | enum netfs_io_source { |
3d3c9504 DH |
117 | NETFS_FILL_WITH_ZEROES, |
118 | NETFS_DOWNLOAD_FROM_SERVER, | |
119 | NETFS_READ_FROM_CACHE, | |
120 | NETFS_INVALID_READ, | |
16af134c DH |
121 | NETFS_UPLOAD_TO_SERVER, |
122 | NETFS_WRITE_TO_CACHE, | |
123 | NETFS_INVALID_WRITE, | |
3d3c9504 DH |
124 | } __mode(byte); |
125 | ||
726218fd DH |
126 | typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, |
127 | bool was_async); | |
128 | ||
bc899ee1 | 129 | /* |
874c8ca1 | 130 | * Per-inode context. This wraps the VFS inode. |
bc899ee1 | 131 | */ |
874c8ca1 DH |
132 | struct netfs_inode { |
133 | struct inode inode; /* The VFS inode */ | |
bc899ee1 DH |
134 | const struct netfs_request_ops *ops; |
135 | #if IS_ENABLED(CONFIG_FSCACHE) | |
136 | struct fscache_cookie *cache; | |
137 | #endif | |
4058f742 | 138 | loff_t remote_i_size; /* Size of the remote file */ |
100ccd18 DH |
139 | loff_t zero_point; /* Size after which we assume there's no data |
140 | * on the server */ | |
46ed60dc DH |
141 | unsigned long flags; |
142 | #define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */ | |
153a9961 | 143 | #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ |
41d8e767 | 144 | #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ |
92a714d7 | 145 | #define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */ |
bc899ee1 DH |
146 | }; |
147 | ||
9ebff83e DH |
148 | /* |
149 | * A netfs group - for instance a ceph snap. This is marked on dirty pages and | |
150 | * pages marked with a group must be flushed before they can be written under | |
151 | * the domain of another group. | |
152 | */ | |
153 | struct netfs_group { | |
154 | refcount_t ref; | |
155 | void (*free)(struct netfs_group *netfs_group); | |
156 | }; | |
157 | ||
158 | /* | |
159 | * Information about a dirty page (attached only if necessary). | |
160 | * folio->private | |
161 | */ | |
162 | struct netfs_folio { | |
163 | struct netfs_group *netfs_group; /* Filesystem's grouping marker (or NULL). */ | |
164 | unsigned int dirty_offset; /* Write-streaming dirty data offset */ | |
165 | unsigned int dirty_len; /* Write-streaming dirty data length */ | |
166 | }; | |
167 | #define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */ | |
168 | ||
169 | static inline struct netfs_folio *netfs_folio_info(struct folio *folio) | |
170 | { | |
171 | void *priv = folio_get_private(folio); | |
172 | ||
173 | if ((unsigned long)priv & NETFS_FOLIO_INFO) | |
174 | return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO); | |
175 | return NULL; | |
176 | } | |
177 | ||
178 | static inline struct netfs_group *netfs_folio_group(struct folio *folio) | |
179 | { | |
180 | struct netfs_folio *finfo; | |
181 | void *priv = folio_get_private(folio); | |
182 | ||
183 | finfo = netfs_folio_info(folio); | |
184 | if (finfo) | |
185 | return finfo->netfs_group; | |
186 | return priv; | |
187 | } | |
188 | ||
726218fd DH |
189 | /* |
190 | * Resources required to do operations on a cache. | |
191 | */ | |
192 | struct netfs_cache_resources { | |
193 | const struct netfs_cache_ops *ops; | |
194 | void *cache_priv; | |
195 | void *cache_priv2; | |
a7e20e31 | 196 | unsigned int debug_id; /* Cookie debug ID */ |
d24af13e | 197 | unsigned int inval_counter; /* object->inval_counter at begin_op */ |
726218fd DH |
198 | }; |
199 | ||
3d3c9504 | 200 | /* |
16af134c DH |
201 | * Descriptor for a single component subrequest. Each operation represents an |
202 | * individual read/write from/to a server, a cache, a journal, etc.. | |
203 | * | |
204 | * The buffer iterator is persistent for the life of the subrequest struct and | |
205 | * the pages it points to can be relied on to exist for the duration. | |
3d3c9504 | 206 | */ |
6a19114b | 207 | struct netfs_io_subrequest { |
f18a3785 | 208 | struct netfs_io_request *rreq; /* Supervising I/O request */ |
040a82be | 209 | struct work_struct work; |
3d3c9504 | 210 | struct list_head rreq_link; /* Link in rreq->subrequests */ |
92b6cc5d | 211 | struct iov_iter io_iter; /* Iterator for this subrequest */ |
3d3c9504 DH |
212 | loff_t start; /* Where to start the I/O */ |
213 | size_t len; /* Size of the I/O */ | |
214 | size_t transferred; /* Amount of data transferred */ | |
6cd3d6fd | 215 | refcount_t ref; |
3d3c9504 DH |
216 | short error; /* 0 or error that occurred */ |
217 | unsigned short debug_index; /* Index in list (for debugging output) */ | |
768ddb1e | 218 | unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */ |
f18a3785 | 219 | enum netfs_io_source source; /* Where to read from/write to */ |
3d3c9504 | 220 | unsigned long flags; |
f18a3785 | 221 | #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ |
3d3c9504 | 222 | #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ |
f18a3785 | 223 | #define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */ |
3d3c9504 DH |
224 | #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ |
225 | #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ | |
9032b6e8 | 226 | #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ |
3d3c9504 DH |
227 | }; |
228 | ||
663dfb65 DH |
229 | enum netfs_io_origin { |
230 | NETFS_READAHEAD, /* This read was triggered by readahead */ | |
231 | NETFS_READPAGE, /* This read is a synchronous read */ | |
232 | NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ | |
16af134c | 233 | NETFS_WRITEBACK, /* This write was triggered by writepages */ |
41d8e767 | 234 | NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ |
4a79616c | 235 | NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */ |
153a9961 | 236 | NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */ |
016dc851 | 237 | NETFS_DIO_READ, /* This is a direct I/O read */ |
153a9961 | 238 | NETFS_DIO_WRITE, /* This is a direct I/O write */ |
16af134c | 239 | nr__netfs_io_origin |
663dfb65 DH |
240 | } __mode(byte); |
241 | ||
3d3c9504 | 242 | /* |
f18a3785 DH |
243 | * Descriptor for an I/O helper request. This is used to make multiple I/O |
244 | * operations to a variety of data stores and then stitch the result together. | |
3d3c9504 | 245 | */ |
6a19114b | 246 | struct netfs_io_request { |
87b57a04 DH |
247 | union { |
248 | struct work_struct work; | |
249 | struct rcu_head rcu; | |
250 | }; | |
3d3c9504 DH |
251 | struct inode *inode; /* The file being accessed */ |
252 | struct address_space *mapping; /* The mapping being accessed */ | |
016dc851 | 253 | struct kiocb *iocb; /* AIO completion vector */ |
726218fd | 254 | struct netfs_cache_resources cache_resources; |
87b57a04 | 255 | struct list_head proc_link; /* Link in netfs_iorequests */ |
f18a3785 | 256 | struct list_head subrequests; /* Contributory I/O operations */ |
92b6cc5d DH |
257 | struct iov_iter iter; /* Unencrypted-side iterator */ |
258 | struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ | |
3d3c9504 | 259 | void *netfs_priv; /* Private data for the netfs */ |
21d706d5 DH |
260 | struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ |
261 | unsigned int direct_bv_count; /* Number of elements in direct_bv[] */ | |
3d3c9504 | 262 | unsigned int debug_id; |
016dc851 | 263 | unsigned int rsize; /* Maximum read size (0 for none) */ |
0e0f2dfe | 264 | unsigned int wsize; /* Maximum write size (0 for none) */ |
16af134c | 265 | unsigned int subreq_counter; /* Next subreq->debug_index */ |
f18a3785 DH |
266 | atomic_t nr_outstanding; /* Number of ops in progress */ |
267 | atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ | |
3d3c9504 DH |
268 | size_t submitted; /* Amount submitted for I/O so far */ |
269 | size_t len; /* Length of the request */ | |
e0ace6ca | 270 | size_t upper_len; /* Length can be extended to here */ |
016dc851 | 271 | size_t transferred; /* Amount to be indicated as transferred */ |
3d3c9504 | 272 | short error; /* 0 or error that occurred */ |
663dfb65 | 273 | enum netfs_io_origin origin; /* Origin of the request */ |
21d706d5 | 274 | bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ |
3d3c9504 DH |
275 | loff_t i_size; /* Size of the file */ |
276 | loff_t start; /* Start position */ | |
78525c74 | 277 | pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ |
de74023b | 278 | refcount_t ref; |
3d3c9504 DH |
279 | unsigned long flags; |
280 | #define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */ | |
f18a3785 | 281 | #define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ |
78525c74 DH |
282 | #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ |
283 | #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ | |
3d3c9504 DH |
284 | #define NETFS_RREQ_FAILED 4 /* The request failed */ |
285 | #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ | |
16af134c DH |
286 | #define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */ |
287 | #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ | |
016dc851 DH |
288 | #define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */ |
289 | #define NETFS_RREQ_BLOCKED 10 /* We blocked */ | |
6a19114b | 290 | const struct netfs_request_ops *netfs_ops; |
0e0f2dfe | 291 | void (*cleanup)(struct netfs_io_request *req); |
3d3c9504 DH |
292 | }; |
293 | ||
294 | /* | |
295 | * Operations the network filesystem can/must provide to the helpers. | |
296 | */ | |
6a19114b | 297 | struct netfs_request_ops { |
cc3cb0a1 DH |
298 | unsigned int io_request_size; /* Alloc size for netfs_io_request struct */ |
299 | unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */ | |
2de16041 | 300 | int (*init_request)(struct netfs_io_request *rreq, struct file *file); |
40a81101 | 301 | void (*free_request)(struct netfs_io_request *rreq); |
5f5ce7ba | 302 | void (*free_subrequest)(struct netfs_io_subrequest *rreq); |
40a81101 | 303 | |
c6dc54dd | 304 | /* Read request handling */ |
6a19114b DH |
305 | void (*expand_readahead)(struct netfs_io_request *rreq); |
306 | bool (*clamp_length)(struct netfs_io_subrequest *subreq); | |
f18a3785 | 307 | void (*issue_read)(struct netfs_io_subrequest *subreq); |
6a19114b | 308 | bool (*is_still_valid)(struct netfs_io_request *rreq); |
e1b1240c | 309 | int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, |
fac47b43 | 310 | struct folio **foliop, void **_fsdata); |
6a19114b | 311 | void (*done)(struct netfs_io_request *rreq); |
c6dc54dd DH |
312 | |
313 | /* Modification handling */ | |
314 | void (*update_i_size)(struct inode *inode, loff_t i_size); | |
0e0f2dfe DH |
315 | |
316 | /* Write request handling */ | |
317 | void (*create_write_requests)(struct netfs_io_request *wreq, | |
318 | loff_t start, size_t len); | |
319 | void (*invalidate_cache)(struct netfs_io_request *wreq); | |
3d3c9504 DH |
320 | }; |
321 | ||
3a11b3a8 DH |
322 | /* |
323 | * How to handle reading from a hole. | |
324 | */ | |
325 | enum netfs_read_from_hole { | |
326 | NETFS_READ_HOLE_IGNORE, | |
327 | NETFS_READ_HOLE_CLEAR, | |
328 | NETFS_READ_HOLE_FAIL, | |
329 | }; | |
330 | ||
726218fd | 331 | /* |
4498a8ec | 332 | * Table of operations for access to a cache. |
726218fd DH |
333 | */ |
334 | struct netfs_cache_ops { | |
335 | /* End an operation */ | |
336 | void (*end_operation)(struct netfs_cache_resources *cres); | |
337 | ||
338 | /* Read data from the cache */ | |
339 | int (*read)(struct netfs_cache_resources *cres, | |
340 | loff_t start_pos, | |
341 | struct iov_iter *iter, | |
3a11b3a8 | 342 | enum netfs_read_from_hole read_hole, |
726218fd DH |
343 | netfs_io_terminated_t term_func, |
344 | void *term_func_priv); | |
345 | ||
346 | /* Write data to the cache */ | |
347 | int (*write)(struct netfs_cache_resources *cres, | |
348 | loff_t start_pos, | |
349 | struct iov_iter *iter, | |
350 | netfs_io_terminated_t term_func, | |
351 | void *term_func_priv); | |
352 | ||
353 | /* Expand readahead request */ | |
354 | void (*expand_readahead)(struct netfs_cache_resources *cres, | |
355 | loff_t *_start, size_t *_len, loff_t i_size); | |
356 | ||
357 | /* Prepare a read operation, shortening it to a cached/uncached | |
358 | * boundary as appropriate. | |
359 | */ | |
6a19114b | 360 | enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, |
874c8ca1 | 361 | loff_t i_size); |
726218fd DH |
362 | |
363 | /* Prepare a write operation, working out what part of the write we can | |
364 | * actually do. | |
365 | */ | |
366 | int (*prepare_write)(struct netfs_cache_resources *cres, | |
e0ace6ca DH |
367 | loff_t *_start, size_t *_len, size_t upper_len, |
368 | loff_t i_size, bool no_space_allocated_yet); | |
bee9f655 | 369 | |
86692475 JX |
370 | /* Prepare an on-demand read operation, shortening it to a cached/uncached |
371 | * boundary as appropriate. | |
372 | */ | |
373 | enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres, | |
374 | loff_t start, size_t *_len, | |
375 | loff_t i_size, | |
376 | unsigned long *_flags, ino_t ino); | |
377 | ||
bee9f655 DH |
378 | /* Query the occupancy of the cache in a region, returning where the |
379 | * next chunk of data starts and how long it is. | |
380 | */ | |
381 | int (*query_occupancy)(struct netfs_cache_resources *cres, | |
382 | loff_t start, size_t len, size_t granularity, | |
383 | loff_t *_data_start, size_t *_data_len); | |
726218fd DH |
384 | }; |
385 | ||
016dc851 DH |
386 | /* High-level read API. */ |
387 | ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); | |
80645bd4 DH |
388 | ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); |
389 | ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); | |
016dc851 | 390 | |
c38f4e96 DH |
391 | /* High-level write API */ |
392 | ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, | |
393 | struct netfs_group *netfs_group); | |
938e13a7 DH |
394 | ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from, |
395 | struct netfs_group *netfs_group); | |
153a9961 | 396 | ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from); |
938e13a7 | 397 | ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from); |
c38f4e96 DH |
398 | |
399 | /* Address operations API */ | |
3d3c9504 | 400 | struct readahead_control; |
0e8e08cc | 401 | void netfs_readahead(struct readahead_control *); |
6c62371b | 402 | int netfs_read_folio(struct file *, struct folio *); |
0e8e08cc | 403 | int netfs_write_begin(struct netfs_inode *, struct file *, |
c1ec4d7c DH |
404 | struct address_space *, loff_t pos, unsigned int len, |
405 | struct folio **, void **fsdata); | |
62c3b748 DH |
406 | int netfs_writepages(struct address_space *mapping, |
407 | struct writeback_control *wbc); | |
c9c4ff12 DH |
408 | bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); |
409 | int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); | |
410 | void netfs_clear_inode_writeback(struct inode *inode, const void *aux); | |
c1ec4d7c DH |
411 | void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length); |
412 | bool netfs_release_folio(struct folio *folio, gfp_t gfp); | |
4a79616c | 413 | int netfs_launder_folio(struct folio *folio); |
0e8e08cc | 414 | |
102a7e2c DH |
415 | /* VMA operations API. */ |
416 | vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group); | |
417 | ||
418 | /* (Sub)request management API. */ | |
0e8e08cc MWO |
419 | void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); |
420 | void netfs_get_subrequest(struct netfs_io_subrequest *subreq, | |
421 | enum netfs_sreq_ref_trace what); | |
422 | void netfs_put_subrequest(struct netfs_io_subrequest *subreq, | |
423 | bool was_async, enum netfs_sreq_ref_trace what); | |
85dd2c8f DH |
424 | ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, |
425 | struct iov_iter *new, | |
426 | iov_iter_extraction_t extraction_flags); | |
cae932d3 DH |
427 | size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, |
428 | size_t max_size, size_t max_segs); | |
0e0f2dfe DH |
429 | struct netfs_io_subrequest *netfs_create_write_request( |
430 | struct netfs_io_request *wreq, enum netfs_io_source dest, | |
431 | loff_t start, size_t len, work_func_t worker); | |
432 | void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, | |
433 | bool was_async); | |
434 | void netfs_queue_write_request(struct netfs_io_subrequest *subreq); | |
3d3c9504 | 435 | |
46ed60dc DH |
436 | int netfs_start_io_read(struct inode *inode); |
437 | void netfs_end_io_read(struct inode *inode); | |
438 | int netfs_start_io_write(struct inode *inode); | |
439 | void netfs_end_io_write(struct inode *inode); | |
440 | int netfs_start_io_direct(struct inode *inode); | |
441 | void netfs_end_io_direct(struct inode *inode); | |
442 | ||
bc899ee1 | 443 | /** |
874c8ca1 | 444 | * netfs_inode - Get the netfs inode context from the inode |
bc899ee1 DH |
445 | * @inode: The inode to query |
446 | * | |
447 | * Get the netfs lib inode context from the network filesystem's inode. The | |
448 | * context struct is expected to directly follow on from the VFS inode struct. | |
449 | */ | |
874c8ca1 | 450 | static inline struct netfs_inode *netfs_inode(struct inode *inode) |
bc899ee1 | 451 | { |
874c8ca1 | 452 | return container_of(inode, struct netfs_inode, inode); |
bc899ee1 DH |
453 | } |
454 | ||
455 | /** | |
874c8ca1 | 456 | * netfs_inode_init - Initialise a netfslib inode context |
018ab4fa | 457 | * @ctx: The netfs inode to initialise |
bc899ee1 | 458 | * @ops: The netfs's operations list |
100ccd18 | 459 | * @use_zero_point: True to use the zero_point read optimisation |
bc899ee1 DH |
460 | * |
461 | * Initialise the netfs library context struct. This is expected to follow on | |
462 | * directly from the VFS inode struct. | |
463 | */ | |
e81fb419 | 464 | static inline void netfs_inode_init(struct netfs_inode *ctx, |
100ccd18 DH |
465 | const struct netfs_request_ops *ops, |
466 | bool use_zero_point) | |
bc899ee1 | 467 | { |
bc899ee1 | 468 | ctx->ops = ops; |
e81fb419 | 469 | ctx->remote_i_size = i_size_read(&ctx->inode); |
100ccd18 | 470 | ctx->zero_point = LLONG_MAX; |
46ed60dc | 471 | ctx->flags = 0; |
874c8ca1 DH |
472 | #if IS_ENABLED(CONFIG_FSCACHE) |
473 | ctx->cache = NULL; | |
474 | #endif | |
100ccd18 DH |
475 | /* ->releasepage() drives zero_point */ |
476 | if (use_zero_point) { | |
477 | ctx->zero_point = ctx->remote_i_size; | |
478 | mapping_set_release_always(ctx->inode.i_mapping); | |
479 | } | |
4058f742 DH |
480 | } |
481 | ||
482 | /** | |
483 | * netfs_resize_file - Note that a file got resized | |
e81fb419 | 484 | * @ctx: The netfs inode being resized |
4058f742 | 485 | * @new_i_size: The new file size |
100ccd18 | 486 | * @changed_on_server: The change was applied to the server |
4058f742 DH |
487 | * |
488 | * Inform the netfs lib that a file got resized so that it can adjust its state. | |
489 | */ | |
100ccd18 DH |
490 | static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size, |
491 | bool changed_on_server) | |
4058f742 | 492 | { |
100ccd18 DH |
493 | if (changed_on_server) |
494 | ctx->remote_i_size = new_i_size; | |
495 | if (new_i_size < ctx->zero_point) | |
496 | ctx->zero_point = new_i_size; | |
bc899ee1 DH |
497 | } |
498 | ||
499 | /** | |
500 | * netfs_i_cookie - Get the cache cookie from the inode | |
e81fb419 | 501 | * @ctx: The netfs inode to query |
bc899ee1 DH |
502 | * |
503 | * Get the caching cookie (if enabled) from the network filesystem's inode. | |
504 | */ | |
e81fb419 | 505 | static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx) |
bc899ee1 DH |
506 | { |
507 | #if IS_ENABLED(CONFIG_FSCACHE) | |
bc899ee1 DH |
508 | return ctx->cache; |
509 | #else | |
510 | return NULL; | |
511 | #endif | |
512 | } | |
513 | ||
b533a83f | 514 | #endif /* _LINUX_NETFS_H */ |