NFSv4: Don't add a new lock on an interrupted wait for LOCK
authorBenjamin Coddington <bcodding@redhat.com>
Thu, 3 May 2018 11:12:57 +0000 (07:12 -0400)
committerTrond Myklebust <trond.myklebust@hammerspace.com>
Thu, 31 May 2018 19:02:16 +0000 (15:02 -0400)
If the wait for a LOCK operation is interrupted, and then the file is
closed, the locks cleanup code will assume that no new locks will be added
to the inode after it has completed.  We already have a mechanism to detect
if there was signal, so let's use that to avoid recreating the local lock
once the RPC completes.  Also skip re-sending the LOCK operation for the
various error cases if we were signaled.

Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
[Trond: Fix inverted test of locks_lock_inode_wait()]
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
fs/nfs/nfs4proc.c

index a0f16c8c5ebd80c688207baefb09405ae636a33f..9945b36ea863127d3ce9a7e16efff6da041558a0 100644 (file)
@@ -6417,32 +6417,36 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
        case 0:
                renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
                                data->timestamp);
-               if (data->arg.new_lock) {
+               if (data->arg.new_lock && !data->cancelled) {
                        data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
-                       if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) {
-                               rpc_restart_call_prepare(task);
+                       if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
                                break;
-                       }
                }
+
                if (data->arg.new_lock_owner != 0) {
                        nfs_confirm_seqid(&lsp->ls_seqid, 0);
                        nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
                        set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
-               } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
-                       rpc_restart_call_prepare(task);
+                       goto out_done;
+               } else if (nfs4_update_lock_stateid(lsp, &data->res.stateid))
+                       goto out_done;
+
                break;
        case -NFS4ERR_BAD_STATEID:
        case -NFS4ERR_OLD_STATEID:
        case -NFS4ERR_STALE_STATEID:
        case -NFS4ERR_EXPIRED:
                if (data->arg.new_lock_owner != 0) {
-                       if (!nfs4_stateid_match(&data->arg.open_stateid,
+                       if (nfs4_stateid_match(&data->arg.open_stateid,
                                                &lsp->ls_state->open_stateid))
-                               rpc_restart_call_prepare(task);
-               } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
+                               goto out_done;
+               } else if (nfs4_stateid_match(&data->arg.lock_stateid,
                                                &lsp->ls_stateid))
-                               rpc_restart_call_prepare(task);
+                               goto out_done;
        }
+       if (!data->cancelled)
+               rpc_restart_call_prepare(task);
+out_done:
        dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
 }