ceph: reorder fields in 'struct ceph_snapid_map'
[linux-block.git] / fs / ceph / locks.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
3d14c5d2 2#include <linux/ceph/ceph_debug.h>
40819f6f
GF
3
4#include <linux/file.h>
5#include <linux/namei.h>
eb13e832 6#include <linux/random.h>
40819f6f
GF
7
8#include "super.h"
9#include "mds_client.h"
5970e15d 10#include <linux/filelock.h>
3d14c5d2 11#include <linux/ceph/pagelist.h>
40819f6f 12
eb13e832 13static u64 lock_secret;
9280be24
YZ
14static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
15 struct ceph_mds_request *req);
eb13e832
YZ
16
17static inline u64 secure_addr(void *addr)
18{
19 u64 v = lock_secret ^ (u64)(unsigned long)addr;
20 /*
21 * Set the most significant bit, so that MDS knows the 'owner'
22 * is sufficient to identify the owner of lock. (old code uses
23 * both 'owner' and 'pid')
24 */
25 v |= (1ULL << 63);
26 return v;
27}
28
29void __init ceph_flock_init(void)
30{
31 get_random_bytes(&lock_secret, sizeof(lock_secret));
32}
33
89aa5930
YZ
34static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
35{
ff5d913d 36 struct inode *inode = file_inode(dst->fl_file);
89aa5930 37 atomic_inc(&ceph_inode(inode)->i_filelock_ref);
8e185871 38 dst->fl_u.ceph.inode = igrab(inode);
89aa5930
YZ
39}
40
8e185871
XL
41/*
42 * Do not use the 'fl->fl_file' in release function, which
43 * is possibly already released by another thread.
44 */
89aa5930
YZ
45static void ceph_fl_release_lock(struct file_lock *fl)
46{
8e185871
XL
47 struct inode *inode = fl->fl_u.ceph.inode;
48 struct ceph_inode_info *ci;
49
50 /*
51 * If inode is NULL it should be a request file_lock,
52 * nothing we can do.
53 */
54 if (!inode)
55 return;
56
57 ci = ceph_inode(inode);
b3f8d68f
YZ
58 if (atomic_dec_and_test(&ci->i_filelock_ref)) {
59 /* clear error when all locks are released */
60 spin_lock(&ci->i_ceph_lock);
61 ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
62 spin_unlock(&ci->i_ceph_lock);
63 }
8e185871
XL
64 fl->fl_u.ceph.inode = NULL;
65 iput(inode);
89aa5930
YZ
66}
67
68static const struct file_lock_operations ceph_fl_lock_ops = {
69 .fl_copy_lock = ceph_fl_copy_lock,
70 .fl_release_private = ceph_fl_release_lock,
71};
72
06a1ad43 73/*
40819f6f
GF
74 * Implement fcntl and flock locking functions.
75 */
89aa5930 76static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
637ae8d5 77 int cmd, u8 wait, struct file_lock *fl)
40819f6f 78{
2678da88 79 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
40819f6f
GF
80 struct ceph_mds_request *req;
81 int err;
637ae8d5 82 u64 length = 0;
eb13e832 83 u64 owner;
40819f6f 84
89aa5930
YZ
85 if (operation == CEPH_MDS_OP_SETFILELOCK) {
86 /*
87 * increasing i_filelock_ref closes race window between
88 * handling request reply and adding file_lock struct to
89 * inode. Otherwise, auth caps may get trimmed in the
90 * window. Caller function will decrease the counter.
91 */
92 fl->fl_ops = &ceph_fl_lock_ops;
ff5d913d 93 fl->fl_ops->fl_copy_lock(fl, NULL);
89aa5930
YZ
94 }
95
9280be24
YZ
96 if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
97 wait = 0;
98
40819f6f
GF
99 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
100 if (IS_ERR(req))
101 return PTR_ERR(req);
70b666c3
SW
102 req->r_inode = inode;
103 ihold(inode);
3bd58143 104 req->r_num_caps = 1;
40819f6f 105
637ae8d5
HS
106 /* mds requires start and length rather than start and end */
107 if (LLONG_MAX == fl->fl_end)
108 length = 0;
109 else
110 length = fl->fl_end - fl->fl_start + 1;
111
130d1f95 112 owner = secure_addr(fl->fl_owner);
eb13e832
YZ
113
114 dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
4c069a58 115 "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type,
eb13e832
YZ
116 (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
117 wait, fl->fl_type);
637ae8d5 118
40819f6f
GF
119 req->r_args.filelock_change.rule = lock_type;
120 req->r_args.filelock_change.type = cmd;
eb13e832 121 req->r_args.filelock_change.owner = cpu_to_le64(owner);
637ae8d5 122 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
637ae8d5 123 req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
40819f6f
GF
124 req->r_args.filelock_change.length = cpu_to_le64(length);
125 req->r_args.filelock_change.wait = wait;
126
9eaa7b79
JL
127 err = ceph_mdsc_submit_request(mdsc, inode, req);
128 if (!err)
129 err = ceph_mdsc_wait_request(mdsc, req, wait ?
130 ceph_lock_wait_for_completion : NULL);
28a28261 131 if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
9d5b86ac 132 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
a5b10629
HS
133 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
134 fl->fl_type = F_RDLCK;
135 else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
136 fl->fl_type = F_WRLCK;
137 else
138 fl->fl_type = F_UNLCK;
139
140 fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
141 length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
142 le64_to_cpu(req->r_reply_info.filelock_reply->length);
143 if (length >= 1)
144 fl->fl_end = length -1;
145 else
146 fl->fl_end = 0;
147
148 }
40819f6f
GF
149 ceph_mdsc_put_request(req);
150 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
4c069a58 151 "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type,
637ae8d5
HS
152 (int)operation, (u64)fl->fl_pid, fl->fl_start,
153 length, wait, fl->fl_type, err);
40819f6f
GF
154 return err;
155}
156
9280be24
YZ
157static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
158 struct ceph_mds_request *req)
159{
160 struct ceph_mds_request *intr_req;
161 struct inode *inode = req->r_inode;
162 int err, lock_type;
163
164 BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK);
165 if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL)
166 lock_type = CEPH_LOCK_FCNTL_INTR;
167 else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK)
168 lock_type = CEPH_LOCK_FLOCK_INTR;
169 else
170 BUG_ON(1);
171 BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK);
172
173 err = wait_for_completion_interruptible(&req->r_completion);
174 if (!err)
175 return 0;
176
177 dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
178 req->r_tid);
179
92e57e62
YZ
180 mutex_lock(&mdsc->mutex);
181 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
182 err = 0;
183 } else {
184 /*
185 * ensure we aren't running concurrently with
186 * ceph_fill_trace or ceph_readdir_prepopulate, which
187 * rely on locks (dir mutex) held by our caller.
188 */
189 mutex_lock(&req->r_fill_mutex);
190 req->r_err = err;
191 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
192 mutex_unlock(&req->r_fill_mutex);
193
194 if (!req->r_session) {
195 // haven't sent the request
196 err = 0;
197 }
198 }
199 mutex_unlock(&mdsc->mutex);
200 if (!err)
201 return 0;
202
9280be24
YZ
203 intr_req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETFILELOCK,
204 USE_AUTH_MDS);
205 if (IS_ERR(intr_req))
206 return PTR_ERR(intr_req);
207
208 intr_req->r_inode = inode;
209 ihold(inode);
210 intr_req->r_num_caps = 1;
211
212 intr_req->r_args.filelock_change = req->r_args.filelock_change;
213 intr_req->r_args.filelock_change.rule = lock_type;
214 intr_req->r_args.filelock_change.type = CEPH_LOCK_UNLOCK;
215
216 err = ceph_mdsc_do_request(mdsc, inode, intr_req);
217 ceph_mdsc_put_request(intr_req);
218
219 if (err && err != -ERESTARTSYS)
220 return err;
221
92e57e62 222 wait_for_completion_killable(&req->r_safe_completion);
9280be24
YZ
223 return 0;
224}
225
bbb480ab
YZ
226static int try_unlock_file(struct file *file, struct file_lock *fl)
227{
228 int err;
229 unsigned int orig_flags = fl->fl_flags;
230 fl->fl_flags |= FL_EXISTS;
231 err = locks_lock_file_wait(file, fl);
232 fl->fl_flags = orig_flags;
233 if (err == -ENOENT) {
234 if (!(orig_flags & FL_EXISTS))
235 err = 0;
236 return err;
237 }
238 return 1;
239}
240
06a1ad43 241/*
40819f6f
GF
242 * Attempt to set an fcntl lock.
243 * For now, this just goes away to the server. Later it may be more awesome.
244 */
245int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
246{
89aa5930 247 struct inode *inode = file_inode(file);
b3f8d68f
YZ
248 struct ceph_inode_info *ci = ceph_inode(inode);
249 int err = 0;
40819f6f 250 u16 op = CEPH_MDS_OP_SETFILELOCK;
89aa5930 251 u8 wait = 0;
b3f8d68f 252 u8 lock_cmd;
40819f6f 253
eb70c0ce
YZ
254 if (!(fl->fl_flags & FL_POSIX))
255 return -ENOLCK;
eb70c0ce 256
5d6451b1
JL
257 if (ceph_inode_is_shutdown(inode))
258 return -ESTALE;
259
4c069a58 260 dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
40819f6f
GF
261
262 /* set wait bit as appropriate, then make command as Ceph expects it*/
0e8e95d6 263 if (IS_GETLK(cmd))
40819f6f 264 op = CEPH_MDS_OP_GETFILELOCK;
0e8e95d6
YZ
265 else if (IS_SETLKW(cmd))
266 wait = 1;
40819f6f 267
b3f8d68f
YZ
268 spin_lock(&ci->i_ceph_lock);
269 if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
270 err = -EIO;
b3f8d68f
YZ
271 }
272 spin_unlock(&ci->i_ceph_lock);
273 if (err < 0) {
274 if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type)
275 posix_lock_file(file, fl, NULL);
276 return err;
89aa5930
YZ
277 }
278
40819f6f
GF
279 if (F_RDLCK == fl->fl_type)
280 lock_cmd = CEPH_LOCK_SHARED;
281 else if (F_WRLCK == fl->fl_type)
282 lock_cmd = CEPH_LOCK_EXCL;
283 else
284 lock_cmd = CEPH_LOCK_UNLOCK;
285
bbb480ab
YZ
286 if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) {
287 err = try_unlock_file(file, fl);
288 if (err <= 0)
289 return err;
290 }
291
89aa5930 292 err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
40819f6f 293 if (!err) {
bbb480ab 294 if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) {
4c069a58 295 dout("mds locked, locking locally\n");
a5b10629 296 err = posix_lock_file(file, fl, NULL);
b3f8d68f 297 if (err) {
0c1f91f2
SW
298 /* undo! This should only happen if
299 * the kernel detects local
300 * deadlock. */
89aa5930 301 ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
a5b10629 302 CEPH_LOCK_UNLOCK, 0, fl);
4c069a58 303 dout("got %d on posix_lock_file, undid lock\n",
0c1f91f2 304 err);
a5b10629 305 }
40819f6f 306 }
40819f6f
GF
307 }
308 return err;
309}
310
311int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
312{
89aa5930 313 struct inode *inode = file_inode(file);
b3f8d68f
YZ
314 struct ceph_inode_info *ci = ceph_inode(inode);
315 int err = 0;
0e8e95d6 316 u8 wait = 0;
b3f8d68f 317 u8 lock_cmd;
40819f6f 318
eb70c0ce
YZ
319 if (!(fl->fl_flags & FL_FLOCK))
320 return -ENOLCK;
eb70c0ce 321
5d6451b1
JL
322 if (ceph_inode_is_shutdown(inode))
323 return -ESTALE;
324
4c069a58 325 dout("ceph_flock, fl_file: %p\n", fl->fl_file);
40819f6f 326
b3f8d68f
YZ
327 spin_lock(&ci->i_ceph_lock);
328 if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
329 err = -EIO;
b3f8d68f
YZ
330 }
331 spin_unlock(&ci->i_ceph_lock);
332 if (err < 0) {
333 if (F_UNLCK == fl->fl_type)
334 locks_lock_file_wait(file, fl);
335 return err;
336 }
89aa5930 337
0e8e95d6
YZ
338 if (IS_SETLKW(cmd))
339 wait = 1;
340
341 if (F_RDLCK == fl->fl_type)
40819f6f 342 lock_cmd = CEPH_LOCK_SHARED;
0e8e95d6 343 else if (F_WRLCK == fl->fl_type)
40819f6f
GF
344 lock_cmd = CEPH_LOCK_EXCL;
345 else
346 lock_cmd = CEPH_LOCK_UNLOCK;
40819f6f 347
bbb480ab
YZ
348 if (F_UNLCK == fl->fl_type) {
349 err = try_unlock_file(file, fl);
350 if (err <= 0)
351 return err;
352 }
353
40819f6f 354 err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
89aa5930 355 inode, lock_cmd, wait, fl);
bbb480ab 356 if (!err && F_UNLCK != fl->fl_type) {
4f656367 357 err = locks_lock_file_wait(file, fl);
40819f6f
GF
358 if (err) {
359 ceph_lock_message(CEPH_LOCK_FLOCK,
360 CEPH_MDS_OP_SETFILELOCK,
89aa5930 361 inode, CEPH_LOCK_UNLOCK, 0, fl);
4c069a58 362 dout("got %d on locks_lock_file_wait, undid lock\n", err);
40819f6f 363 }
40819f6f
GF
364 }
365 return err;
366}
367
5263e31e
JL
368/*
369 * Fills in the passed counter variables, so you can prepare pagelist metadata
370 * before calling ceph_encode_locks.
40819f6f
GF
371 */
372void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
373{
e084c1bd 374 struct file_lock *lock;
5263e31e 375 struct file_lock_context *ctx;
40819f6f
GF
376
377 *fcntl_count = 0;
378 *flock_count = 0;
379
d4e78663 380 ctx = locks_inode_context(inode);
5263e31e 381 if (ctx) {
e084c1bd
JL
382 spin_lock(&ctx->flc_lock);
383 list_for_each_entry(lock, &ctx->flc_posix, fl_list)
384 ++(*fcntl_count);
385 list_for_each_entry(lock, &ctx->flc_flock, fl_list)
386 ++(*flock_count);
387 spin_unlock(&ctx->flc_lock);
40819f6f 388 }
4c069a58 389 dout("counted %d flock locks and %d fcntl locks\n",
40819f6f
GF
390 *flock_count, *fcntl_count);
391}
392
c6db8472
YZ
393/*
394 * Given a pointer to a lock, convert it to a ceph filelock
395 */
396static int lock_to_ceph_filelock(struct file_lock *lock,
397 struct ceph_filelock *cephlock)
398{
399 int err = 0;
400 cephlock->start = cpu_to_le64(lock->fl_start);
401 cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
402 cephlock->client = cpu_to_le64(0);
403 cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
404 cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
405
406 switch (lock->fl_type) {
407 case F_RDLCK:
408 cephlock->type = CEPH_LOCK_SHARED;
409 break;
410 case F_WRLCK:
411 cephlock->type = CEPH_LOCK_EXCL;
412 break;
413 case F_UNLCK:
414 cephlock->type = CEPH_LOCK_UNLOCK;
415 break;
416 default:
4c069a58 417 dout("Have unknown lock type %d\n", lock->fl_type);
c6db8472
YZ
418 err = -EINVAL;
419 }
420
421 return err;
422}
423
06a1ad43 424/*
39be95e9 425 * Encode the flock and fcntl locks for the given inode into the ceph_filelock
1c8c601a 426 * array. Must be called with inode->i_lock already held.
39be95e9 427 * If we encounter more of a specific lock type than expected, return -ENOSPC.
40819f6f 428 */
39be95e9
JS
429int ceph_encode_locks_to_buffer(struct inode *inode,
430 struct ceph_filelock *flocks,
431 int num_fcntl_locks, int num_flock_locks)
40819f6f
GF
432{
433 struct file_lock *lock;
d4e78663 434 struct file_lock_context *ctx = locks_inode_context(inode);
40819f6f 435 int err = 0;
fca4451a
GF
436 int seen_fcntl = 0;
437 int seen_flock = 0;
39be95e9 438 int l = 0;
40819f6f 439
4c069a58 440 dout("encoding %d flock and %d fcntl locks\n", num_flock_locks,
40819f6f 441 num_fcntl_locks);
39be95e9 442
bd61e0a9
JL
443 if (!ctx)
444 return 0;
445
6109c850 446 spin_lock(&ctx->flc_lock);
f6762cb2 447 list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
bd61e0a9
JL
448 ++seen_fcntl;
449 if (seen_fcntl > num_fcntl_locks) {
450 err = -ENOSPC;
451 goto fail;
40819f6f 452 }
bd61e0a9
JL
453 err = lock_to_ceph_filelock(lock, &flocks[l]);
454 if (err)
455 goto fail;
456 ++l;
40819f6f 457 }
bd61e0a9
JL
458 list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
459 ++seen_flock;
460 if (seen_flock > num_flock_locks) {
461 err = -ENOSPC;
462 goto fail;
40819f6f 463 }
bd61e0a9
JL
464 err = lock_to_ceph_filelock(lock, &flocks[l]);
465 if (err)
466 goto fail;
467 ++l;
40819f6f
GF
468 }
469fail:
6109c850 470 spin_unlock(&ctx->flc_lock);
40819f6f
GF
471 return err;
472}
473
06a1ad43 474/*
39be95e9
JS
475 * Copy the encoded flock and fcntl locks into the pagelist.
476 * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
477 * sequential flock locks.
478 * Returns zero on success.
479 */
480int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
481 struct ceph_pagelist *pagelist,
482 int num_fcntl_locks, int num_flock_locks)
483{
484 int err = 0;
485 __le32 nlocks;
486
487 nlocks = cpu_to_le32(num_fcntl_locks);
488 err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
489 if (err)
490 goto out_fail;
491
4deb14a2
YZ
492 if (num_fcntl_locks > 0) {
493 err = ceph_pagelist_append(pagelist, flocks,
494 num_fcntl_locks * sizeof(*flocks));
495 if (err)
496 goto out_fail;
497 }
39be95e9
JS
498
499 nlocks = cpu_to_le32(num_flock_locks);
500 err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
501 if (err)
502 goto out_fail;
503
4deb14a2
YZ
504 if (num_flock_locks > 0) {
505 err = ceph_pagelist_append(pagelist, &flocks[num_fcntl_locks],
506 num_flock_locks * sizeof(*flocks));
507 }
39be95e9
JS
508out_fail:
509 return err;
510}