netfs: Provide func to copy data to pagecache for buffered write
[linux-2.6-block.git] / fs / netfs / io.c
index e374767d1b6832745e9c59249d426c331669f95b..774aef6ea4cbcd4a4539d70c12e05ad3c1ae0934 100644 (file)
  */
 static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
 {
-       struct iov_iter iter;
-
-       iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-       iov_iter_zero(iov_iter_count(&iter), &iter);
+       iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
 }
 
 static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
@@ -46,14 +41,9 @@ static void netfs_read_from_cache(struct netfs_io_request *rreq,
                                  enum netfs_read_from_hole read_hole)
 {
        struct netfs_cache_resources *cres = &rreq->cache_resources;
-       struct iov_iter iter;
 
        netfs_stat(&netfs_n_rh_read);
-       iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-
-       cres->ops->read(cres, subreq->start, &iter, read_hole,
+       cres->ops->read(cres, subreq->start, &subreq->io_iter, read_hole,
                        netfs_cache_read_terminated, subreq);
 }
 
@@ -88,6 +78,11 @@ static void netfs_read_from_server(struct netfs_io_request *rreq,
                                   struct netfs_io_subrequest *subreq)
 {
        netfs_stat(&netfs_n_rh_download);
+       if (iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred)
+               pr_warn("R=%08x[%u] ITER PRE-MISMATCH %zx != %zx-%zx %lx\n",
+                       rreq->debug_id, subreq->debug_index,
+                       iov_iter_count(&subreq->io_iter), subreq->len,
+                       subreq->transferred, subreq->flags);
        rreq->netfs_ops->issue_read(subreq);
 }
 
@@ -130,6 +125,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
                        if (have_unlocked && folio_index(folio) <= unlocked)
                                continue;
                        unlocked = folio_index(folio);
+                       trace_netfs_folio(folio, netfs_folio_trace_end_copy);
                        folio_end_fscache(folio);
                        have_unlocked = true;
                }
@@ -208,7 +204,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
                        continue;
                }
 
-               iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
+               iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
                                subreq->start, subreq->len);
 
                atomic_inc(&rreq->nr_copy_ops);
@@ -259,6 +255,30 @@ static void netfs_rreq_short_read(struct netfs_io_request *rreq,
                netfs_read_from_server(rreq, subreq);
 }
 
+/*
+ * Reset the subrequest iterator prior to resubmission.
+ */
+static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
+                                   struct netfs_io_subrequest *subreq)
+{
+       size_t remaining = subreq->len - subreq->transferred;
+       size_t count = iov_iter_count(&subreq->io_iter);
+
+       if (count == remaining)
+               return;
+
+       _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
+              rreq->debug_id, subreq->debug_index,
+              iov_iter_count(&subreq->io_iter), subreq->transferred,
+              subreq->len, rreq->i_size,
+              subreq->io_iter.iter_type);
+
+       if (count < remaining)
+               iov_iter_revert(&subreq->io_iter, remaining - count);
+       else
+               iov_iter_advance(&subreq->io_iter, count - remaining);
+}
+
 /*
  * Resubmit any short or failed operations.  Returns true if we got the rreq
  * ref back.
@@ -287,6 +307,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
                        trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
                        netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
                        atomic_inc(&rreq->nr_outstanding);
+                       netfs_reset_subreq_iter(rreq, subreq);
                        netfs_read_from_server(rreq, subreq);
                } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
                        netfs_rreq_short_read(rreq, subreq);
@@ -342,6 +363,7 @@ again:
 
        netfs_rreq_unlock_folios(rreq);
 
+       trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
        clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
        wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
 
@@ -399,9 +421,9 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
        struct netfs_io_request *rreq = subreq->rreq;
        int u;
 
-       _enter("[%u]{%llx,%lx},%zd",
-              subreq->debug_index, subreq->start, subreq->flags,
-              transferred_or_error);
+       _enter("R=%x[%x]{%llx,%lx},%zd",
+              rreq->debug_id, subreq->debug_index,
+              subreq->start, subreq->flags, transferred_or_error);
 
        switch (subreq->source) {
        case NETFS_READ_FROM_CACHE:
@@ -501,9 +523,11 @@ static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest
  */
 static enum netfs_io_source
 netfs_rreq_prepare_read(struct netfs_io_request *rreq,
-                       struct netfs_io_subrequest *subreq)
+                       struct netfs_io_subrequest *subreq,
+                       struct iov_iter *io_iter)
 {
        enum netfs_io_source source;
+       size_t lsize;
 
        _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
 
@@ -526,11 +550,33 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
                        source = NETFS_INVALID_READ;
                        goto out;
                }
+
+               if (subreq->max_nr_segs) {
+                       lsize = netfs_limit_iter(io_iter, 0, subreq->len,
+                                                subreq->max_nr_segs);
+                       if (subreq->len > lsize) {
+                               subreq->len = lsize;
+                               trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
+                       }
+               }
        }
 
-       if (WARN_ON(subreq->len == 0))
+       if (subreq->len > rreq->len)
+               pr_warn("R=%08x[%u] SREQ>RREQ %zx > %zx\n",
+                       rreq->debug_id, subreq->debug_index,
+                       subreq->len, rreq->len);
+
+       if (WARN_ON(subreq->len == 0)) {
                source = NETFS_INVALID_READ;
+               goto out;
+       }
+
+       subreq->source = source;
+       trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
 
+       subreq->io_iter = *io_iter;
+       iov_iter_truncate(&subreq->io_iter, subreq->len);
+       iov_iter_advance(io_iter, subreq->len);
 out:
        subreq->source = source;
        trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
@@ -541,6 +587,7 @@ out:
  * Slice off a piece of a read request and submit an I/O request for it.
  */
 static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
+                                   struct iov_iter *io_iter,
                                    unsigned int *_debug_index)
 {
        struct netfs_io_subrequest *subreq;
@@ -565,7 +612,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
         * (the starts must coincide), in which case, we go around the loop
         * again and ask it to download the next piece.
         */
-       source = netfs_rreq_prepare_read(rreq, subreq);
+       source = netfs_rreq_prepare_read(rreq, subreq, io_iter);
        if (source == NETFS_INVALID_READ)
                goto subreq_failed;
 
@@ -603,6 +650,7 @@ subreq_failed:
  */
 int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
 {
+       struct iov_iter io_iter;
        unsigned int debug_index = 0;
        int ret;
 
@@ -611,45 +659,45 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
 
        if (rreq->len == 0) {
                pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
-               netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
                return -EIO;
        }
 
-       INIT_WORK(&rreq->work, netfs_rreq_work);
+       rreq->io_iter = rreq->iter;
 
-       if (sync)
-               netfs_get_request(rreq, netfs_rreq_trace_get_hold);
+       INIT_WORK(&rreq->work, netfs_rreq_work);
 
        /* Chop the read into slices according to what the cache and the netfs
         * want and submit each one.
         */
+       netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding);
        atomic_set(&rreq->nr_outstanding, 1);
+       io_iter = rreq->io_iter;
        do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
+               if (!netfs_rreq_submit_slice(rreq, &io_iter, &debug_index))
                        break;
 
        } while (rreq->submitted < rreq->len);
 
        if (sync) {
-               /* Keep nr_outstanding incremented so that the ref always belongs to
-                * us, and the service code isn't punted off to a random thread pool to
-                * process.
+               /* Keep nr_outstanding incremented so that the ref always
+                * belongs to us, and the service code isn't punted off to a
+                * random thread pool to process.  Note that this might start
+                * further work, such as writing to the cache.
                 */
-               for (;;) {
-                       wait_var_event(&rreq->nr_outstanding,
-                                      atomic_read(&rreq->nr_outstanding) == 1);
+               wait_var_event(&rreq->nr_outstanding,
+                              atomic_read(&rreq->nr_outstanding) == 1);
+               if (atomic_dec_and_test(&rreq->nr_outstanding))
                        netfs_rreq_assess(rreq, false);
-                       if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
-                               break;
-                       cond_resched();
-               }
+
+               trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
+               wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
+                           TASK_UNINTERRUPTIBLE);
 
                ret = rreq->error;
                if (ret == 0 && rreq->submitted < rreq->len) {
                        trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
                        ret = -EIO;
                }
-               netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
        } else {
                /* If we decrement nr_outstanding to 0, the ref belongs to us. */
                if (atomic_dec_and_test(&rreq->nr_outstanding))