xprtrdma: Fix a maybe-uninitialized compiler warning
authorBenjamin Coddington <bcodding@redhat.com>
Tue, 2 Nov 2021 18:48:59 +0000 (14:48 -0400)
committerTrond Myklebust <trond.myklebust@hammerspace.com>
Tue, 2 Nov 2021 20:06:33 +0000 (16:06 -0400)
This minor fix-up keeps GCC from complaining that "last' may be used
uninitialized", which breaks some build workflows that have been running
with all warnings treated as errors.

Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
net/sunrpc/xprtrdma/frwr_ops.c

index 3eccf365fcb8c855c0ae4d142185f7ec63bcb854..ff699307e8200e25d94cb95b2d83492739b2bce4 100644 (file)
@@ -515,8 +515,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
         * a single ib_post_send() call.
         */
        prev = &first;
-       while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
-
+       mr = rpcrdma_mr_pop(&req->rl_registered);
+       do {
                trace_xprtrdma_mr_localinv(mr);
                r_xprt->rx_stats.local_inv_needed++;
 
@@ -533,7 +533,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 
                *prev = last;
                prev = &last->next;
-       }
+       } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
+
        mr = container_of(last, struct rpcrdma_mr, mr_invwr);
 
        /* Strong send queue ordering guarantees that when the
@@ -617,8 +618,8 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
         * a single ib_post_send() call.
         */
        prev = &first;
-       while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
-
+       mr = rpcrdma_mr_pop(&req->rl_registered);
+       do {
                trace_xprtrdma_mr_localinv(mr);
                r_xprt->rx_stats.local_inv_needed++;
 
@@ -635,7 +636,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 
                *prev = last;
                prev = &last->next;
-       }
+       } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
 
        /* Strong send queue ordering guarantees that when the
         * last WR in the chain completes, all WRs in the chain