1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Miscellaneous routines.
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/swap.h>
12 * Append a folio to the rolling queue.
14 int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
17 struct folio_queue *tail = rreq->buffer_tail;
18 unsigned int slot, order = folio_order(folio);
20 if (WARN_ON_ONCE(!rreq->buffer && tail) ||
21 WARN_ON_ONCE(rreq->buffer && !tail))
24 if (!tail || folioq_full(tail)) {
25 tail = kmalloc(sizeof(*tail), GFP_NOFS);
28 netfs_stat(&netfs_n_folioq);
30 tail->prev = rreq->buffer_tail;
32 tail->prev->next = tail;
33 rreq->buffer_tail = tail;
36 iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
38 rreq->buffer_tail_slot = 0;
41 rreq->io_iter.count += PAGE_SIZE << order;
43 slot = folioq_append(tail, folio);
44 /* Store the counter after setting the slot. */
45 smp_store_release(&rreq->buffer_tail_slot, slot);
50 * Delete the head of a rolling queue.
52 struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
54 struct folio_queue *head = wreq->buffer, *next = head->next;
58 netfs_stat_d(&netfs_n_folioq);
65 * Clear out a rolling queue.
67 void netfs_clear_buffer(struct netfs_io_request *rreq)
69 struct folio_queue *p;
71 while ((p = rreq->buffer)) {
72 rreq->buffer = p->next;
73 for (int slot = 0; slot < folioq_nr_slots(p); slot++) {
74 struct folio *folio = folioq_folio(p, slot);
77 if (folioq_is_marked(p, slot)) {
78 trace_netfs_folio(folio, netfs_folio_trace_put);
82 netfs_stat_d(&netfs_n_folioq);
88 * Reset the subrequest iterator to refer just to the region remaining to be
89 * read. The iterator may or may not have been advanced by socket ops or
90 * extraction ops to an extent that may or may not match the amount actually
93 void netfs_reset_iter(struct netfs_io_subrequest *subreq)
95 struct iov_iter *io_iter = &subreq->io_iter;
96 size_t remain = subreq->len - subreq->transferred;
98 if (io_iter->count > remain)
99 iov_iter_advance(io_iter, io_iter->count - remain);
100 else if (io_iter->count < remain)
101 iov_iter_revert(io_iter, remain - io_iter->count);
102 iov_iter_truncate(&subreq->io_iter, remain);
106 * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
107 * @mapping: The mapping the folio belongs to.
108 * @folio: The folio being dirtied.
110 * Set the dirty flag on a folio and pin an in-use cache object in memory so
111 * that writeback can later write to it. This is intended to be called from
112 * the filesystem's ->dirty_folio() method.
114 * Return: true if the dirty flag was set on the folio, false otherwise.
116 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
118 struct inode *inode = mapping->host;
119 struct netfs_inode *ictx = netfs_inode(inode);
120 struct fscache_cookie *cookie = netfs_i_cookie(ictx);
121 bool need_use = false;
125 if (!filemap_dirty_folio(mapping, folio))
127 if (!fscache_cookie_valid(cookie))
130 if (!(inode->i_state & I_PINNING_NETFS_WB)) {
131 spin_lock(&inode->i_lock);
132 if (!(inode->i_state & I_PINNING_NETFS_WB)) {
133 inode->i_state |= I_PINNING_NETFS_WB;
136 spin_unlock(&inode->i_lock);
139 fscache_use_cookie(cookie, true);
143 EXPORT_SYMBOL(netfs_dirty_folio);
146 * netfs_unpin_writeback - Unpin writeback resources
147 * @inode: The inode on which the cookie resides
148 * @wbc: The writeback control
150 * Unpin the writeback resources pinned by netfs_dirty_folio(). This is
151 * intended to be called as/by the netfs's ->write_inode() method.
153 int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
155 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
157 if (wbc->unpinned_netfs_wb)
158 fscache_unuse_cookie(cookie, NULL, NULL);
161 EXPORT_SYMBOL(netfs_unpin_writeback);
164 * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
165 * @inode: The inode to clean up
166 * @aux: Auxiliary data to apply to the inode
168 * Clear any writeback resources held by an inode when the inode is evicted.
169 * This must be called before clear_inode() is called.
171 void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
173 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
175 if (inode->i_state & I_PINNING_NETFS_WB) {
176 loff_t i_size = i_size_read(inode);
177 fscache_unuse_cookie(cookie, aux, &i_size);
180 EXPORT_SYMBOL(netfs_clear_inode_writeback);
183 * netfs_invalidate_folio - Invalidate or partially invalidate a folio
184 * @folio: Folio proposed for release
185 * @offset: Offset of the invalidated region
186 * @length: Length of the invalidated region
188 * Invalidate part or all of a folio for a network filesystem. The folio will
189 * be removed afterwards if the invalidated region covers the entire folio.
191 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
193 struct netfs_folio *finfo;
194 struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
195 size_t flen = folio_size(folio);
197 _enter("{%lx},%zx,%zx", folio->index, offset, length);
199 if (offset == 0 && length == flen) {
200 unsigned long long i_size = i_size_read(&ctx->inode);
201 unsigned long long fpos = folio_pos(folio), end;
203 end = umin(fpos + flen, i_size);
204 if (fpos < i_size && end > ctx->zero_point)
205 ctx->zero_point = end;
208 folio_wait_private_2(folio); /* [DEPRECATED] */
210 if (!folio_test_private(folio))
213 finfo = netfs_folio_info(folio);
215 if (offset == 0 && length >= flen)
216 goto erase_completely;
219 /* We have a partially uptodate page from a streaming write. */
220 unsigned int fstart = finfo->dirty_offset;
221 unsigned int fend = fstart + finfo->dirty_len;
222 unsigned int iend = offset + length;
229 /* The invalidation region overlaps the data. If the region
230 * covers the start of the data, we either move along the start
231 * or just erase the data entirely.
233 if (offset <= fstart) {
235 goto erase_completely;
236 /* Move the start of the data. */
237 finfo->dirty_len = fend - iend;
238 finfo->dirty_offset = offset;
242 /* Reduce the length of the data if the invalidation region
243 * covers the tail part.
246 finfo->dirty_len = offset - fstart;
250 /* A partial write was split. The caller has already zeroed
251 * it, so just absorb the hole.
257 netfs_put_group(netfs_folio_group(folio));
258 folio_detach_private(folio);
259 folio_clear_uptodate(folio);
263 EXPORT_SYMBOL(netfs_invalidate_folio);
266 * netfs_release_folio - Try to release a folio
267 * @folio: Folio proposed for release
268 * @gfp: Flags qualifying the release
270 * Request release of a folio and clean up its private state if it's not busy.
271 * Returns true if the folio can now be released, false if not
273 bool netfs_release_folio(struct folio *folio, gfp_t gfp)
275 struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
276 unsigned long long end;
278 if (folio_test_dirty(folio))
281 end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
282 if (end > ctx->zero_point)
283 ctx->zero_point = end;
285 if (folio_test_private(folio))
287 if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */
288 if (current_is_kswapd() || !(gfp & __GFP_FS))
290 folio_wait_private_2(folio);
292 fscache_note_page_release(netfs_i_cookie(ctx));
295 EXPORT_SYMBOL(netfs_release_folio);