1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Cache data I/O routines
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
7 #define FSCACHE_DEBUG_LEVEL OPERATION
8 #include <linux/fscache-cache.h>
10 #include <linux/bvec.h>
11 #include <linux/slab.h>
12 #include <linux/uio.h>
16 * fscache_wait_for_operation - Wait for an object become accessible
17 * @cres: The cache resources for the operation being performed
18 * @want_state: The minimum state the object must be at
20 * See if the target cache object is at the specified minimum state of
21 * accessibility yet, and if not, wait for it.
23 bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
24 enum fscache_want_state want_state)
26 struct fscache_cookie *cookie = fscache_cres_cookie(cres);
27 enum fscache_cookie_state state;
30 if (!fscache_cache_is_live(cookie->volume->cache)) {
35 state = fscache_cookie_state(cookie);
36 _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
39 case FSCACHE_COOKIE_STATE_CREATING:
40 case FSCACHE_COOKIE_STATE_INVALIDATING:
41 if (want_state == FSCACHE_WANT_PARAMS)
42 goto ready; /* There can be no content */
44 case FSCACHE_COOKIE_STATE_LOOKING_UP:
45 case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
46 wait_var_event(&cookie->state,
47 fscache_cookie_state(cookie) != state);
50 case FSCACHE_COOKIE_STATE_ACTIVE:
52 case FSCACHE_COOKIE_STATE_DROPPED:
53 case FSCACHE_COOKIE_STATE_RELINQUISHING:
55 _leave(" [not live]");
60 if (!cres->cache_priv2)
61 return cookie->volume->cache->ops->begin_operation(cres, want_state);
64 EXPORT_SYMBOL(fscache_wait_for_operation);
67 * Begin an I/O operation on the cache, waiting till we reach the right state.
69 * Attaches the resources required to the operation resources record.
71 static int fscache_begin_operation(struct netfs_cache_resources *cres,
72 struct fscache_cookie *cookie,
73 enum fscache_want_state want_state,
74 enum fscache_access_trace why)
76 enum fscache_cookie_state state;
78 bool once_only = false;
81 cres->cache_priv = cookie;
82 cres->cache_priv2 = NULL;
83 cres->debug_id = cookie->debug_id;
84 cres->inval_counter = cookie->inval_counter;
86 if (!fscache_begin_cookie_access(cookie, why))
90 spin_lock(&cookie->lock);
92 state = fscache_cookie_state(cookie);
93 _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
96 case FSCACHE_COOKIE_STATE_LOOKING_UP:
97 case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
98 case FSCACHE_COOKIE_STATE_INVALIDATING:
99 goto wait_for_file_wrangling;
100 case FSCACHE_COOKIE_STATE_CREATING:
101 if (want_state == FSCACHE_WANT_PARAMS)
102 goto ready; /* There can be no content */
103 goto wait_for_file_wrangling;
104 case FSCACHE_COOKIE_STATE_ACTIVE:
106 case FSCACHE_COOKIE_STATE_DROPPED:
107 case FSCACHE_COOKIE_STATE_RELINQUISHING:
108 WARN(1, "Can't use cookie in state %u\n", cookie->state);
115 spin_unlock(&cookie->lock);
116 if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
120 wait_for_file_wrangling:
121 spin_unlock(&cookie->lock);
122 trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
123 atomic_read(&cookie->n_accesses),
124 fscache_access_io_wait);
125 timeo = wait_var_event_timeout(&cookie->state,
126 fscache_cookie_state(cookie) != state, 20 * HZ);
127 if (timeo <= 1 && !once_only) {
128 pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
129 __func__, fscache_cookie_state(cookie), state);
130 fscache_print_cookie(cookie, 'O');
136 spin_unlock(&cookie->lock);
138 cres->cache_priv = NULL;
140 fscache_end_cookie_access(cookie, fscache_access_io_not_live);
141 _leave(" = -ENOBUFS");
145 int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
146 struct fscache_cookie *cookie)
148 return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
149 fscache_access_io_read);
151 EXPORT_SYMBOL(__fscache_begin_read_operation);
154 * fscache_set_page_dirty - Mark page dirty and pin a cache object for writeback
155 * @page: The page being dirtied
156 * @cookie: The cookie referring to the cache object
158 * Set the dirty flag on a page and pin an in-use cache object in memory when
159 * dirtying a page so that writeback can later write to it. This is intended
160 * to be called from the filesystem's ->set_page_dirty() method.
162 * Returns 1 if PG_dirty was set on the page, 0 otherwise.
164 int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie)
166 struct inode *inode = page->mapping->host;
167 bool need_use = false;
171 if (!__set_page_dirty_nobuffers(page))
173 if (!fscache_cookie_valid(cookie))
176 if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
177 spin_lock(&inode->i_lock);
178 if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
179 inode->i_state |= I_PINNING_FSCACHE_WB;
182 spin_unlock(&inode->i_lock);
185 fscache_use_cookie(cookie, true);
189 EXPORT_SYMBOL(fscache_set_page_dirty);
191 struct fscache_write_request {
192 struct netfs_cache_resources cache_resources;
193 struct address_space *mapping;
197 netfs_io_terminated_t term_func;
198 void *term_func_priv;
201 void __fscache_clear_page_bits(struct address_space *mapping,
202 loff_t start, size_t len)
204 pgoff_t first = start / PAGE_SIZE;
205 pgoff_t last = (start + len - 1) / PAGE_SIZE;
209 XA_STATE(xas, &mapping->i_pages, first);
212 xas_for_each(&xas, page, last) {
213 end_page_fscache(page);
218 EXPORT_SYMBOL(__fscache_clear_page_bits);
221 * Deal with the completion of writing the data to the cache.
223 static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
226 struct fscache_write_request *wreq = priv;
228 fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources),
229 wreq->mapping, wreq->start, wreq->len,
233 wreq->term_func(wreq->term_func_priv, transferred_or_error,
235 fscache_end_operation(&wreq->cache_resources);
239 void __fscache_write_to_cache(struct fscache_cookie *cookie,
240 struct address_space *mapping,
241 loff_t start, size_t len, loff_t i_size,
242 netfs_io_terminated_t term_func,
243 void *term_func_priv,
246 struct fscache_write_request *wreq;
247 struct netfs_cache_resources *cres;
248 struct iov_iter iter;
254 _enter("%llx,%zx", start, len);
256 wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
259 wreq->mapping = mapping;
262 wreq->set_bits = cond;
263 wreq->term_func = term_func;
264 wreq->term_func_priv = term_func_priv;
266 cres = &wreq->cache_resources;
267 if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
268 fscache_access_io_write) < 0)
271 ret = cres->ops->prepare_write(cres, &start, &len, i_size, false);
275 /* TODO: Consider clearing page bits now for space the write isn't
276 * covering. This is more complicated than it appears when THPs are
277 * taken into account.
280 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
281 fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
285 return fscache_wreq_done(wreq, ret, false);
289 fscache_clear_page_bits(cookie, mapping, start, len, cond);
291 term_func(term_func_priv, ret, false);
293 EXPORT_SYMBOL(__fscache_write_to_cache);
296 * Change the size of a backing object.
298 void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
300 struct netfs_cache_resources cres;
302 trace_fscache_resize(cookie, new_size);
303 if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
304 fscache_access_io_resize) == 0) {
305 fscache_stat(&fscache_n_resizes);
306 set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
308 /* We cannot defer a resize as we need to do it inside the
309 * netfs's inode lock so that we're serialised with respect to
312 cookie->volume->cache->ops->resize_cookie(&cres, new_size);
313 fscache_end_operation(&cres);
315 fscache_stat(&fscache_n_resizes_null);
318 EXPORT_SYMBOL(__fscache_resize_cookie);