Commit | Line | Data |
---|---|---|
3d3c9504 DH |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* Network filesystem high-level read support. | |
3 | * | |
4 | * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
6 | */ | |
7 | ||
8 | #include <linux/module.h> | |
9 | #include <linux/export.h> | |
10 | #include <linux/fs.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/pagemap.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/uio.h> | |
15 | #include <linux/sched/mm.h> | |
16 | #include <linux/task_io_accounting_ops.h> | |
3d3c9504 | 17 | #include "internal.h" |
3d3c9504 | 18 | |
3d3c9504 DH |
19 | /* |
20 | * Clear the unread part of an I/O request. | |
21 | */ | |
6a19114b | 22 | static void netfs_clear_unread(struct netfs_io_subrequest *subreq) |
3d3c9504 DH |
23 | { |
24 | struct iov_iter iter; | |
25 | ||
de4eda9d | 26 | iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages, |
3d3c9504 DH |
27 | subreq->start + subreq->transferred, |
28 | subreq->len - subreq->transferred); | |
29 | iov_iter_zero(iov_iter_count(&iter), &iter); | |
30 | } | |
31 | ||
726218fd DH |
32 | static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, |
33 | bool was_async) | |
34 | { | |
6a19114b | 35 | struct netfs_io_subrequest *subreq = priv; |
726218fd DH |
36 | |
37 | netfs_subreq_terminated(subreq, transferred_or_error, was_async); | |
38 | } | |
39 | ||
40 | /* | |
41 | * Issue a read against the cache. | |
42 | * - Eats the caller's ref on subreq. | |
43 | */ | |
6a19114b DH |
44 | static void netfs_read_from_cache(struct netfs_io_request *rreq, |
45 | struct netfs_io_subrequest *subreq, | |
3a11b3a8 | 46 | enum netfs_read_from_hole read_hole) |
726218fd DH |
47 | { |
48 | struct netfs_cache_resources *cres = &rreq->cache_resources; | |
49 | struct iov_iter iter; | |
50 | ||
51 | netfs_stat(&netfs_n_rh_read); | |
de4eda9d | 52 | iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, |
726218fd DH |
53 | subreq->start + subreq->transferred, |
54 | subreq->len - subreq->transferred); | |
55 | ||
3a11b3a8 | 56 | cres->ops->read(cres, subreq->start, &iter, read_hole, |
726218fd DH |
57 | netfs_cache_read_terminated, subreq); |
58 | } | |
59 | ||
3d3c9504 DH |
60 | /* |
61 | * Fill a subrequest region with zeroes. | |
62 | */ | |
6a19114b DH |
63 | static void netfs_fill_with_zeroes(struct netfs_io_request *rreq, |
64 | struct netfs_io_subrequest *subreq) | |
3d3c9504 | 65 | { |
289af54c | 66 | netfs_stat(&netfs_n_rh_zero); |
3d3c9504 DH |
67 | __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); |
68 | netfs_subreq_terminated(subreq, 0, false); | |
69 | } | |
70 | ||
71 | /* | |
72 | * Ask the netfs to issue a read request to the server for us. | |
73 | * | |
74 | * The netfs is expected to read from subreq->pos + subreq->transferred to | |
75 | * subreq->pos + subreq->len - 1. It may not backtrack and write data into the | |
76 | * buffer prior to the transferred point as it might clobber dirty data | |
77 | * obtained from the cache. | |
78 | * | |
79 | * Alternatively, the netfs is allowed to indicate one of two things: | |
80 | * | |
81 | * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and | |
82 | * make progress. | |
83 | * | |
84 | * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be | |
85 | * cleared. | |
86 | */ | |
6a19114b DH |
87 | static void netfs_read_from_server(struct netfs_io_request *rreq, |
88 | struct netfs_io_subrequest *subreq) | |
3d3c9504 | 89 | { |
289af54c | 90 | netfs_stat(&netfs_n_rh_download); |
f18a3785 | 91 | rreq->netfs_ops->issue_read(subreq); |
3d3c9504 DH |
92 | } |
93 | ||
94 | /* | |
95 | * Release those waiting. | |
96 | */ | |
6a19114b | 97 | static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) |
3d3c9504 | 98 | { |
77b4d2c6 | 99 | trace_netfs_rreq(rreq, netfs_rreq_trace_done); |
f18a3785 | 100 | netfs_clear_subrequests(rreq, was_async); |
de74023b | 101 | netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete); |
3d3c9504 DH |
102 | } |
103 | ||
726218fd DH |
104 | /* |
105 | * Deal with the completion of writing the data to the cache. We have to clear | |
78525c74 | 106 | * the PG_fscache bits on the folios involved and release the caller's ref. |
726218fd DH |
107 | * |
108 | * May be called in softirq mode and we inherit a ref from the caller. | |
109 | */ | |
6a19114b | 110 | static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, |
726218fd DH |
111 | bool was_async) |
112 | { | |
6a19114b | 113 | struct netfs_io_subrequest *subreq; |
78525c74 | 114 | struct folio *folio; |
726218fd DH |
115 | pgoff_t unlocked = 0; |
116 | bool have_unlocked = false; | |
117 | ||
118 | rcu_read_lock(); | |
119 | ||
120 | list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { | |
121 | XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE); | |
122 | ||
78525c74 | 123 | xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) { |
7e043a80 DH |
124 | if (xas_retry(&xas, folio)) |
125 | continue; | |
126 | ||
726218fd | 127 | /* We might have multiple writes from the same huge |
78525c74 | 128 | * folio, but we mustn't unlock a folio more than once. |
726218fd | 129 | */ |
78525c74 | 130 | if (have_unlocked && folio_index(folio) <= unlocked) |
726218fd | 131 | continue; |
78525c74 DH |
132 | unlocked = folio_index(folio); |
133 | folio_end_fscache(folio); | |
726218fd DH |
134 | have_unlocked = true; |
135 | } | |
136 | } | |
137 | ||
138 | rcu_read_unlock(); | |
139 | netfs_rreq_completed(rreq, was_async); | |
140 | } | |
141 | ||
142 | static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, | |
143 | bool was_async) | |
144 | { | |
6a19114b DH |
145 | struct netfs_io_subrequest *subreq = priv; |
146 | struct netfs_io_request *rreq = subreq->rreq; | |
726218fd DH |
147 | |
148 | if (IS_ERR_VALUE(transferred_or_error)) { | |
149 | netfs_stat(&netfs_n_rh_write_failed); | |
0246f3e5 DH |
150 | trace_netfs_failure(rreq, subreq, transferred_or_error, |
151 | netfs_fail_copy_to_cache); | |
726218fd DH |
152 | } else { |
153 | netfs_stat(&netfs_n_rh_write_done); | |
154 | } | |
155 | ||
156 | trace_netfs_sreq(subreq, netfs_sreq_trace_write_term); | |
157 | ||
6a19114b DH |
158 | /* If we decrement nr_copy_ops to 0, the ref belongs to us. */ |
159 | if (atomic_dec_and_test(&rreq->nr_copy_ops)) | |
726218fd DH |
160 | netfs_rreq_unmark_after_write(rreq, was_async); |
161 | ||
6cd3d6fd | 162 | netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); |
726218fd DH |
163 | } |
164 | ||
165 | /* | |
166 | * Perform any outstanding writes to the cache. We inherit a ref from the | |
167 | * caller. | |
168 | */ | |
6a19114b | 169 | static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) |
726218fd DH |
170 | { |
171 | struct netfs_cache_resources *cres = &rreq->cache_resources; | |
6a19114b | 172 | struct netfs_io_subrequest *subreq, *next, *p; |
726218fd DH |
173 | struct iov_iter iter; |
174 | int ret; | |
175 | ||
18b3ff9f | 176 | trace_netfs_rreq(rreq, netfs_rreq_trace_copy); |
726218fd DH |
177 | |
178 | /* We don't want terminating writes trying to wake us up whilst we're | |
179 | * still going through the list. | |
180 | */ | |
6a19114b | 181 | atomic_inc(&rreq->nr_copy_ops); |
726218fd DH |
182 | |
183 | list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) { | |
f18a3785 | 184 | if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { |
726218fd | 185 | list_del_init(&subreq->rreq_link); |
6cd3d6fd DH |
186 | netfs_put_subrequest(subreq, false, |
187 | netfs_sreq_trace_put_no_copy); | |
726218fd DH |
188 | } |
189 | } | |
190 | ||
191 | list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { | |
192 | /* Amalgamate adjacent writes */ | |
193 | while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { | |
194 | next = list_next_entry(subreq, rreq_link); | |
195 | if (next->start != subreq->start + subreq->len) | |
196 | break; | |
197 | subreq->len += next->len; | |
198 | list_del_init(&next->rreq_link); | |
6cd3d6fd DH |
199 | netfs_put_subrequest(next, false, |
200 | netfs_sreq_trace_put_merged); | |
726218fd DH |
201 | } |
202 | ||
203 | ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len, | |
a39c41b8 | 204 | rreq->i_size, true); |
726218fd | 205 | if (ret < 0) { |
0246f3e5 | 206 | trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write); |
726218fd DH |
207 | trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip); |
208 | continue; | |
209 | } | |
210 | ||
de4eda9d | 211 | iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages, |
726218fd DH |
212 | subreq->start, subreq->len); |
213 | ||
6a19114b | 214 | atomic_inc(&rreq->nr_copy_ops); |
726218fd | 215 | netfs_stat(&netfs_n_rh_write); |
6cd3d6fd | 216 | netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache); |
726218fd DH |
217 | trace_netfs_sreq(subreq, netfs_sreq_trace_write); |
218 | cres->ops->write(cres, subreq->start, &iter, | |
219 | netfs_rreq_copy_terminated, subreq); | |
220 | } | |
221 | ||
6a19114b DH |
222 | /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */ |
223 | if (atomic_dec_and_test(&rreq->nr_copy_ops)) | |
726218fd DH |
224 | netfs_rreq_unmark_after_write(rreq, false); |
225 | } | |
226 | ||
227 | static void netfs_rreq_write_to_cache_work(struct work_struct *work) | |
228 | { | |
6a19114b DH |
229 | struct netfs_io_request *rreq = |
230 | container_of(work, struct netfs_io_request, work); | |
726218fd DH |
231 | |
232 | netfs_rreq_do_write_to_cache(rreq); | |
233 | } | |
234 | ||
6a19114b | 235 | static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) |
726218fd | 236 | { |
598ad0bd DH |
237 | rreq->work.func = netfs_rreq_write_to_cache_work; |
238 | if (!queue_work(system_unbound_wq, &rreq->work)) | |
239 | BUG(); | |
726218fd DH |
240 | } |
241 | ||
3d3c9504 DH |
242 | /* |
243 | * Handle a short read. | |
244 | */ | |
6a19114b DH |
245 | static void netfs_rreq_short_read(struct netfs_io_request *rreq, |
246 | struct netfs_io_subrequest *subreq) | |
3d3c9504 | 247 | { |
f18a3785 | 248 | __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); |
3d3c9504 DH |
249 | __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags); |
250 | ||
289af54c | 251 | netfs_stat(&netfs_n_rh_short_read); |
77b4d2c6 DH |
252 | trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short); |
253 | ||
6cd3d6fd | 254 | netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read); |
6a19114b | 255 | atomic_inc(&rreq->nr_outstanding); |
726218fd | 256 | if (subreq->source == NETFS_READ_FROM_CACHE) |
3a11b3a8 | 257 | netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR); |
726218fd DH |
258 | else |
259 | netfs_read_from_server(rreq, subreq); | |
3d3c9504 DH |
260 | } |
261 | ||
262 | /* | |
263 | * Resubmit any short or failed operations. Returns true if we got the rreq | |
264 | * ref back. | |
265 | */ | |
6a19114b | 266 | static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq) |
3d3c9504 | 267 | { |
6a19114b | 268 | struct netfs_io_subrequest *subreq; |
3d3c9504 DH |
269 | |
270 | WARN_ON(in_interrupt()); | |
271 | ||
77b4d2c6 DH |
272 | trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit); |
273 | ||
3d3c9504 DH |
274 | /* We don't want terminating submissions trying to wake us up whilst |
275 | * we're still going through the list. | |
276 | */ | |
6a19114b | 277 | atomic_inc(&rreq->nr_outstanding); |
3d3c9504 DH |
278 | |
279 | __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); | |
280 | list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { | |
281 | if (subreq->error) { | |
282 | if (subreq->source != NETFS_READ_FROM_CACHE) | |
283 | break; | |
284 | subreq->source = NETFS_DOWNLOAD_FROM_SERVER; | |
285 | subreq->error = 0; | |
289af54c | 286 | netfs_stat(&netfs_n_rh_download_instead); |
77b4d2c6 | 287 | trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); |
6cd3d6fd | 288 | netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); |
6a19114b | 289 | atomic_inc(&rreq->nr_outstanding); |
3d3c9504 | 290 | netfs_read_from_server(rreq, subreq); |
f18a3785 | 291 | } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) { |
3d3c9504 DH |
292 | netfs_rreq_short_read(rreq, subreq); |
293 | } | |
294 | } | |
295 | ||
6a19114b DH |
296 | /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */ |
297 | if (atomic_dec_and_test(&rreq->nr_outstanding)) | |
3d3c9504 DH |
298 | return true; |
299 | ||
6a19114b | 300 | wake_up_var(&rreq->nr_outstanding); |
3d3c9504 DH |
301 | return false; |
302 | } | |
303 | ||
726218fd DH |
304 | /* |
305 | * Check to see if the data read is still valid. | |
306 | */ | |
6a19114b | 307 | static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq) |
726218fd | 308 | { |
6a19114b | 309 | struct netfs_io_subrequest *subreq; |
726218fd DH |
310 | |
311 | if (!rreq->netfs_ops->is_still_valid || | |
312 | rreq->netfs_ops->is_still_valid(rreq)) | |
313 | return; | |
314 | ||
315 | list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { | |
316 | if (subreq->source == NETFS_READ_FROM_CACHE) { | |
317 | subreq->error = -ESTALE; | |
318 | __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); | |
319 | } | |
320 | } | |
321 | } | |
322 | ||
3d3c9504 DH |
323 | /* |
324 | * Assess the state of a read request and decide what to do next. | |
325 | * | |
326 | * Note that we could be in an ordinary kernel thread, on a workqueue or in | |
327 | * softirq context at this point. We inherit a ref from the caller. | |
328 | */ | |
6a19114b | 329 | static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async) |
3d3c9504 | 330 | { |
77b4d2c6 DH |
331 | trace_netfs_rreq(rreq, netfs_rreq_trace_assess); |
332 | ||
3d3c9504 | 333 | again: |
726218fd DH |
334 | netfs_rreq_is_still_valid(rreq); |
335 | ||
3d3c9504 DH |
336 | if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) && |
337 | test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) { | |
338 | if (netfs_rreq_perform_resubmissions(rreq)) | |
339 | goto again; | |
340 | return; | |
341 | } | |
342 | ||
93345c3b | 343 | netfs_rreq_unlock_folios(rreq); |
3d3c9504 DH |
344 | |
345 | clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); | |
346 | wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); | |
347 | ||
f18a3785 | 348 | if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) |
598ad0bd | 349 | return netfs_rreq_write_to_cache(rreq); |
726218fd | 350 | |
3d3c9504 DH |
351 | netfs_rreq_completed(rreq, was_async); |
352 | } | |
353 | ||
4090b314 | 354 | static void netfs_rreq_work(struct work_struct *work) |
3d3c9504 | 355 | { |
6a19114b DH |
356 | struct netfs_io_request *rreq = |
357 | container_of(work, struct netfs_io_request, work); | |
3d3c9504 DH |
358 | netfs_rreq_assess(rreq, false); |
359 | } | |
360 | ||
361 | /* | |
362 | * Handle the completion of all outstanding I/O operations on a read request. | |
363 | * We inherit a ref from the caller. | |
364 | */ | |
6a19114b | 365 | static void netfs_rreq_terminated(struct netfs_io_request *rreq, |
3d3c9504 DH |
366 | bool was_async) |
367 | { | |
368 | if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) && | |
369 | was_async) { | |
370 | if (!queue_work(system_unbound_wq, &rreq->work)) | |
371 | BUG(); | |
372 | } else { | |
373 | netfs_rreq_assess(rreq, was_async); | |
374 | } | |
375 | } | |
376 | ||
377 | /** | |
378 | * netfs_subreq_terminated - Note the termination of an I/O operation. | |
379 | * @subreq: The I/O request that has terminated. | |
380 | * @transferred_or_error: The amount of data transferred or an error code. | |
381 | * @was_async: The termination was asynchronous | |
382 | * | |
383 | * This tells the read helper that a contributory I/O operation has terminated, | |
384 | * one way or another, and that it should integrate the results. | |
385 | * | |
386 | * The caller indicates in @transferred_or_error the outcome of the operation, | |
387 | * supplying a positive value to indicate the number of bytes transferred, 0 to | |
388 | * indicate a failure to transfer anything that should be retried or a negative | |
389 | * error code. The helper will look after reissuing I/O operations as | |
390 | * appropriate and writing downloaded data to the cache. | |
391 | * | |
392 | * If @was_async is true, the caller might be running in softirq or interrupt | |
393 | * context and we can't sleep. | |
394 | */ | |
6a19114b | 395 | void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, |
3d3c9504 DH |
396 | ssize_t transferred_or_error, |
397 | bool was_async) | |
398 | { | |
6a19114b | 399 | struct netfs_io_request *rreq = subreq->rreq; |
3d3c9504 DH |
400 | int u; |
401 | ||
402 | _enter("[%u]{%llx,%lx},%zd", | |
403 | subreq->debug_index, subreq->start, subreq->flags, | |
404 | transferred_or_error); | |
405 | ||
289af54c DH |
406 | switch (subreq->source) { |
407 | case NETFS_READ_FROM_CACHE: | |
408 | netfs_stat(&netfs_n_rh_read_done); | |
409 | break; | |
410 | case NETFS_DOWNLOAD_FROM_SERVER: | |
411 | netfs_stat(&netfs_n_rh_download_done); | |
412 | break; | |
413 | default: | |
414 | break; | |
415 | } | |
416 | ||
3d3c9504 DH |
417 | if (IS_ERR_VALUE(transferred_or_error)) { |
418 | subreq->error = transferred_or_error; | |
0246f3e5 DH |
419 | trace_netfs_failure(rreq, subreq, transferred_or_error, |
420 | netfs_fail_read); | |
3d3c9504 DH |
421 | goto failed; |
422 | } | |
423 | ||
424 | if (WARN(transferred_or_error > subreq->len - subreq->transferred, | |
425 | "Subreq overread: R%x[%x] %zd > %zu - %zu", | |
426 | rreq->debug_id, subreq->debug_index, | |
427 | transferred_or_error, subreq->len, subreq->transferred)) | |
428 | transferred_or_error = subreq->len - subreq->transferred; | |
429 | ||
430 | subreq->error = 0; | |
431 | subreq->transferred += transferred_or_error; | |
432 | if (subreq->transferred < subreq->len) | |
433 | goto incomplete; | |
434 | ||
435 | complete: | |
436 | __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); | |
f18a3785 DH |
437 | if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) |
438 | set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); | |
3d3c9504 DH |
439 | |
440 | out: | |
77b4d2c6 DH |
441 | trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); |
442 | ||
6a19114b DH |
443 | /* If we decrement nr_outstanding to 0, the ref belongs to us. */ |
444 | u = atomic_dec_return(&rreq->nr_outstanding); | |
3d3c9504 DH |
445 | if (u == 0) |
446 | netfs_rreq_terminated(rreq, was_async); | |
447 | else if (u == 1) | |
6a19114b | 448 | wake_up_var(&rreq->nr_outstanding); |
3d3c9504 | 449 | |
6cd3d6fd | 450 | netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); |
3d3c9504 DH |
451 | return; |
452 | ||
453 | incomplete: | |
454 | if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) { | |
455 | netfs_clear_unread(subreq); | |
456 | subreq->transferred = subreq->len; | |
457 | goto complete; | |
458 | } | |
459 | ||
460 | if (transferred_or_error == 0) { | |
461 | if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) { | |
462 | subreq->error = -ENODATA; | |
463 | goto failed; | |
464 | } | |
465 | } else { | |
466 | __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); | |
467 | } | |
468 | ||
f18a3785 | 469 | __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); |
3d3c9504 DH |
470 | set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); |
471 | goto out; | |
472 | ||
473 | failed: | |
474 | if (subreq->source == NETFS_READ_FROM_CACHE) { | |
289af54c | 475 | netfs_stat(&netfs_n_rh_read_failed); |
3d3c9504 DH |
476 | set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); |
477 | } else { | |
289af54c | 478 | netfs_stat(&netfs_n_rh_download_failed); |
3d3c9504 DH |
479 | set_bit(NETFS_RREQ_FAILED, &rreq->flags); |
480 | rreq->error = subreq->error; | |
481 | } | |
482 | goto out; | |
483 | } | |
484 | EXPORT_SYMBOL(netfs_subreq_terminated); | |
485 | ||
6a19114b | 486 | static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq, |
3d3c9504 DH |
487 | loff_t i_size) |
488 | { | |
6a19114b | 489 | struct netfs_io_request *rreq = subreq->rreq; |
726218fd | 490 | struct netfs_cache_resources *cres = &rreq->cache_resources; |
3d3c9504 | 491 | |
726218fd DH |
492 | if (cres->ops) |
493 | return cres->ops->prepare_read(subreq, i_size); | |
3d3c9504 DH |
494 | if (subreq->start >= rreq->i_size) |
495 | return NETFS_FILL_WITH_ZEROES; | |
496 | return NETFS_DOWNLOAD_FROM_SERVER; | |
497 | } | |
498 | ||
499 | /* | |
500 | * Work out what sort of subrequest the next one will be. | |
501 | */ | |
6a19114b DH |
502 | static enum netfs_io_source |
503 | netfs_rreq_prepare_read(struct netfs_io_request *rreq, | |
504 | struct netfs_io_subrequest *subreq) | |
3d3c9504 | 505 | { |
6a19114b | 506 | enum netfs_io_source source; |
3d3c9504 DH |
507 | |
508 | _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size); | |
509 | ||
510 | source = netfs_cache_prepare_read(subreq, rreq->i_size); | |
511 | if (source == NETFS_INVALID_READ) | |
512 | goto out; | |
513 | ||
514 | if (source == NETFS_DOWNLOAD_FROM_SERVER) { | |
515 | /* Call out to the netfs to let it shrink the request to fit | |
516 | * its own I/O sizes and boundaries. If it shinks it here, it | |
517 | * will be called again to make simultaneous calls; if it wants | |
518 | * to make serial calls, it can indicate a short read and then | |
519 | * we will call it again. | |
520 | */ | |
521 | if (subreq->len > rreq->i_size - subreq->start) | |
522 | subreq->len = rreq->i_size - subreq->start; | |
523 | ||
524 | if (rreq->netfs_ops->clamp_length && | |
525 | !rreq->netfs_ops->clamp_length(subreq)) { | |
526 | source = NETFS_INVALID_READ; | |
527 | goto out; | |
528 | } | |
529 | } | |
530 | ||
531 | if (WARN_ON(subreq->len == 0)) | |
532 | source = NETFS_INVALID_READ; | |
533 | ||
534 | out: | |
535 | subreq->source = source; | |
77b4d2c6 | 536 | trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); |
3d3c9504 DH |
537 | return source; |
538 | } | |
539 | ||
540 | /* | |
541 | * Slice off a piece of a read request and submit an I/O request for it. | |
542 | */ | |
6a19114b | 543 | static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, |
3d3c9504 DH |
544 | unsigned int *_debug_index) |
545 | { | |
6a19114b DH |
546 | struct netfs_io_subrequest *subreq; |
547 | enum netfs_io_source source; | |
3d3c9504 DH |
548 | |
549 | subreq = netfs_alloc_subrequest(rreq); | |
550 | if (!subreq) | |
551 | return false; | |
552 | ||
553 | subreq->debug_index = (*_debug_index)++; | |
554 | subreq->start = rreq->start + rreq->submitted; | |
555 | subreq->len = rreq->len - rreq->submitted; | |
556 | ||
557 | _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted); | |
558 | list_add_tail(&subreq->rreq_link, &rreq->subrequests); | |
559 | ||
560 | /* Call out to the cache to find out what it can do with the remaining | |
561 | * subset. It tells us in subreq->flags what it decided should be done | |
562 | * and adjusts subreq->len down if the subset crosses a cache boundary. | |
563 | * | |
564 | * Then when we hand the subset, it can choose to take a subset of that | |
565 | * (the starts must coincide), in which case, we go around the loop | |
566 | * again and ask it to download the next piece. | |
567 | */ | |
568 | source = netfs_rreq_prepare_read(rreq, subreq); | |
569 | if (source == NETFS_INVALID_READ) | |
570 | goto subreq_failed; | |
571 | ||
6a19114b | 572 | atomic_inc(&rreq->nr_outstanding); |
3d3c9504 DH |
573 | |
574 | rreq->submitted += subreq->len; | |
575 | ||
77b4d2c6 | 576 | trace_netfs_sreq(subreq, netfs_sreq_trace_submit); |
3d3c9504 DH |
577 | switch (source) { |
578 | case NETFS_FILL_WITH_ZEROES: | |
579 | netfs_fill_with_zeroes(rreq, subreq); | |
580 | break; | |
581 | case NETFS_DOWNLOAD_FROM_SERVER: | |
582 | netfs_read_from_server(rreq, subreq); | |
583 | break; | |
726218fd | 584 | case NETFS_READ_FROM_CACHE: |
3a11b3a8 | 585 | netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE); |
726218fd | 586 | break; |
3d3c9504 DH |
587 | default: |
588 | BUG(); | |
589 | } | |
590 | ||
591 | return true; | |
592 | ||
593 | subreq_failed: | |
594 | rreq->error = subreq->error; | |
6cd3d6fd | 595 | netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed); |
3d3c9504 DH |
596 | return false; |
597 | } | |
598 | ||
4090b314 DH |
599 | /* |
600 | * Begin the process of reading in a chunk of data, where that data may be | |
601 | * stitched together from multiple sources, including multiple servers and the | |
602 | * local cache. | |
603 | */ | |
604 | int netfs_begin_read(struct netfs_io_request *rreq, bool sync) | |
605 | { | |
606 | unsigned int debug_index = 0; | |
607 | int ret; | |
608 | ||
609 | _enter("R=%x %llx-%llx", | |
610 | rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); | |
611 | ||
612 | if (rreq->len == 0) { | |
613 | pr_err("Zero-sized read [R=%x]\n", rreq->debug_id); | |
614 | netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len); | |
615 | return -EIO; | |
616 | } | |
617 | ||
618 | INIT_WORK(&rreq->work, netfs_rreq_work); | |
619 | ||
620 | if (sync) | |
621 | netfs_get_request(rreq, netfs_rreq_trace_get_hold); | |
622 | ||
623 | /* Chop the read into slices according to what the cache and the netfs | |
624 | * want and submit each one. | |
625 | */ | |
626 | atomic_set(&rreq->nr_outstanding, 1); | |
627 | do { | |
628 | if (!netfs_rreq_submit_slice(rreq, &debug_index)) | |
629 | break; | |
630 | ||
631 | } while (rreq->submitted < rreq->len); | |
632 | ||
633 | if (sync) { | |
634 | /* Keep nr_outstanding incremented so that the ref always belongs to | |
635 | * us, and the service code isn't punted off to a random thread pool to | |
636 | * process. | |
637 | */ | |
638 | for (;;) { | |
639 | wait_var_event(&rreq->nr_outstanding, | |
640 | atomic_read(&rreq->nr_outstanding) == 1); | |
641 | netfs_rreq_assess(rreq, false); | |
642 | if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) | |
643 | break; | |
644 | cond_resched(); | |
645 | } | |
646 | ||
647 | ret = rreq->error; | |
648 | if (ret == 0 && rreq->submitted < rreq->len) { | |
649 | trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); | |
650 | ret = -EIO; | |
651 | } | |
652 | netfs_put_request(rreq, false, netfs_rreq_trace_put_hold); | |
653 | } else { | |
654 | /* If we decrement nr_outstanding to 0, the ref belongs to us. */ | |
655 | if (atomic_dec_and_test(&rreq->nr_outstanding)) | |
656 | netfs_rreq_assess(rreq, false); | |
657 | ret = 0; | |
658 | } | |
659 | return ret; | |
660 | } |