4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
36 * FL_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
46 * FL_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52 * Removed some race conditions in flock_lock_file(), marked other possible
53 * races. Just grep for FIXME to see them.
54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58 * once we've checked for blocking and deadlocking.
59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61 * Initial implementation of mandatory locks. SunOS turned out to be
62 * a rotten model, so I implemented the "obvious" semantics.
63 * See 'Documentation/filesystems/mandatory-locking.txt' for details.
64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67 * check if a file has mandatory locks, used by mmap(), open() and creat() to
68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72 * Tidied up block list handling. Added '/proc/locks' interface.
73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75 * Fixed deadlock condition for pathological code that mixes calls to
76 * flock() and fcntl().
77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81 * guarantee sensible behaviour in the case where file system modules might
82 * be compiled with different options than the kernel itself.
83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90 * locks. Changed process synchronisation to avoid dereferencing locks that
91 * have already been freed.
92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94 * Made the block list a circular list to minimise searching in the list.
95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97 * Made mandatory locking a mount option. Default is not to allow mandatory
99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101 * Some adaptations for NFS support.
102 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
107 * Use slab allocator instead of kmalloc/kfree.
108 * Use generic list implementation from <linux/list.h>.
109 * Sped up posix_locks_deadlock by only considering blocked locks.
110 * Matthew Wilcox <willy@debian.org>, March, 2000.
112 * Leases and LOCK_MAND
113 * Matthew Wilcox <willy@debian.org>, June, 2000.
114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/security.h>
123 #include <linux/slab.h>
124 #include <linux/syscalls.h>
125 #include <linux/time.h>
126 #include <linux/rcupdate.h>
127 #include <linux/pid_namespace.h>
128 #include <linux/hashtable.h>
129 #include <linux/percpu.h>
131 #define CREATE_TRACE_POINTS
132 #include <trace/events/filelock.h>
134 #include <linux/uaccess.h>
136 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
137 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
138 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
139 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
140 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
142 static bool lease_breaking(struct file_lock *fl)
144 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
147 static int target_leasetype(struct file_lock *fl)
149 if (fl->fl_flags & FL_UNLOCK_PENDING)
151 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
156 int leases_enable = 1;
157 int lease_break_time = 45;
160 * The global file_lock_list is only used for displaying /proc/locks, so we
161 * keep a list on each CPU, with each list protected by its own spinlock.
162 * Global serialization is done using file_rwsem.
164 * Note that alterations to the list also require that the relevant flc_lock is
167 struct file_lock_list_struct {
169 struct hlist_head hlist;
171 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
172 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
175 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
176 * It is protected by blocked_lock_lock.
178 * We hash locks by lockowner in order to optimize searching for the lock a
179 * particular lockowner is waiting on.
181 * FIXME: make this value scale via some heuristic? We generally will want more
182 * buckets when we have more lockowners holding locks, but that's a little
183 * difficult to determine without knowing what the workload will look like.
185 #define BLOCKED_HASH_BITS 7
186 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
189 * This lock protects the blocked_hash. Generally, if you're accessing it, you
190 * want to be holding this lock.
192 * In addition, it also protects the fl->fl_blocked_requests list, and the
193 * fl->fl_blocker pointer for file_lock structures that are acting as lock
194 * requests (in contrast to those that are acting as records of acquired locks).
196 * Note that when we acquire this lock in order to change the above fields,
197 * we often hold the flc_lock as well. In certain cases, when reading the fields
198 * protected by this lock, we can skip acquiring it iff we already hold the
201 static DEFINE_SPINLOCK(blocked_lock_lock);
203 static struct kmem_cache *flctx_cache __read_mostly;
204 static struct kmem_cache *filelock_cache __read_mostly;
206 static struct file_lock_context *
207 locks_get_lock_context(struct inode *inode, int type)
209 struct file_lock_context *ctx;
211 /* paired with cmpxchg() below */
212 ctx = smp_load_acquire(&inode->i_flctx);
213 if (likely(ctx) || type == F_UNLCK)
216 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
220 spin_lock_init(&ctx->flc_lock);
221 INIT_LIST_HEAD(&ctx->flc_flock);
222 INIT_LIST_HEAD(&ctx->flc_posix);
223 INIT_LIST_HEAD(&ctx->flc_lease);
226 * Assign the pointer if it's not already assigned. If it is, then
227 * free the context we just allocated.
229 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
230 kmem_cache_free(flctx_cache, ctx);
231 ctx = smp_load_acquire(&inode->i_flctx);
234 trace_locks_get_lock_context(inode, type, ctx);
239 locks_dump_ctx_list(struct list_head *list, char *list_type)
241 struct file_lock *fl;
243 list_for_each_entry(fl, list, fl_list) {
244 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
249 locks_check_ctx_lists(struct inode *inode)
251 struct file_lock_context *ctx = inode->i_flctx;
253 if (unlikely(!list_empty(&ctx->flc_flock) ||
254 !list_empty(&ctx->flc_posix) ||
255 !list_empty(&ctx->flc_lease))) {
256 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
257 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
259 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
260 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
261 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
266 locks_check_ctx_file_list(struct file *filp, struct list_head *list,
269 struct file_lock *fl;
270 struct inode *inode = locks_inode(filp);
272 list_for_each_entry(fl, list, fl_list)
273 if (fl->fl_file == filp)
274 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
275 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
276 list_type, MAJOR(inode->i_sb->s_dev),
277 MINOR(inode->i_sb->s_dev), inode->i_ino,
278 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
282 locks_free_lock_context(struct inode *inode)
284 struct file_lock_context *ctx = inode->i_flctx;
287 locks_check_ctx_lists(inode);
288 kmem_cache_free(flctx_cache, ctx);
292 static void locks_init_lock_heads(struct file_lock *fl)
294 INIT_HLIST_NODE(&fl->fl_link);
295 INIT_LIST_HEAD(&fl->fl_list);
296 INIT_LIST_HEAD(&fl->fl_blocked_requests);
297 INIT_LIST_HEAD(&fl->fl_blocked_member);
298 init_waitqueue_head(&fl->fl_wait);
301 /* Allocate an empty lock structure. */
302 struct file_lock *locks_alloc_lock(void)
304 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
307 locks_init_lock_heads(fl);
311 EXPORT_SYMBOL_GPL(locks_alloc_lock);
313 void locks_release_private(struct file_lock *fl)
316 if (fl->fl_ops->fl_release_private)
317 fl->fl_ops->fl_release_private(fl);
322 if (fl->fl_lmops->lm_put_owner) {
323 fl->fl_lmops->lm_put_owner(fl->fl_owner);
329 EXPORT_SYMBOL_GPL(locks_release_private);
331 /* Free a lock which is not in use. */
332 void locks_free_lock(struct file_lock *fl)
334 BUG_ON(waitqueue_active(&fl->fl_wait));
335 BUG_ON(!list_empty(&fl->fl_list));
336 BUG_ON(!list_empty(&fl->fl_blocked_requests));
337 BUG_ON(!list_empty(&fl->fl_blocked_member));
338 BUG_ON(!hlist_unhashed(&fl->fl_link));
340 locks_release_private(fl);
341 kmem_cache_free(filelock_cache, fl);
343 EXPORT_SYMBOL(locks_free_lock);
346 locks_dispose_list(struct list_head *dispose)
348 struct file_lock *fl;
350 while (!list_empty(dispose)) {
351 fl = list_first_entry(dispose, struct file_lock, fl_list);
352 list_del_init(&fl->fl_list);
357 void locks_init_lock(struct file_lock *fl)
359 memset(fl, 0, sizeof(struct file_lock));
360 locks_init_lock_heads(fl);
363 EXPORT_SYMBOL(locks_init_lock);
366 * Initialize a new lock from an existing file_lock structure.
368 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
370 new->fl_owner = fl->fl_owner;
371 new->fl_pid = fl->fl_pid;
373 new->fl_flags = fl->fl_flags;
374 new->fl_type = fl->fl_type;
375 new->fl_start = fl->fl_start;
376 new->fl_end = fl->fl_end;
377 new->fl_lmops = fl->fl_lmops;
381 if (fl->fl_lmops->lm_get_owner)
382 fl->fl_lmops->lm_get_owner(fl->fl_owner);
385 EXPORT_SYMBOL(locks_copy_conflock);
387 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
389 /* "new" must be a freshly-initialized lock */
390 WARN_ON_ONCE(new->fl_ops);
392 locks_copy_conflock(new, fl);
394 new->fl_file = fl->fl_file;
395 new->fl_ops = fl->fl_ops;
398 if (fl->fl_ops->fl_copy_lock)
399 fl->fl_ops->fl_copy_lock(new, fl);
403 EXPORT_SYMBOL(locks_copy_lock);
405 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
410 * As ctx->flc_lock is held, new requests cannot be added to
411 * ->fl_blocked_requests, so we don't need a lock to check if it
414 if (list_empty(&fl->fl_blocked_requests))
416 spin_lock(&blocked_lock_lock);
417 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
418 list_for_each_entry(f, &fl->fl_blocked_requests, fl_blocked_member)
420 spin_unlock(&blocked_lock_lock);
423 static inline int flock_translate_cmd(int cmd) {
425 return cmd & (LOCK_MAND | LOCK_RW);
437 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
438 static struct file_lock *
439 flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
441 int type = flock_translate_cmd(cmd);
444 return ERR_PTR(type);
447 fl = locks_alloc_lock();
449 return ERR_PTR(-ENOMEM);
456 fl->fl_pid = current->tgid;
457 fl->fl_flags = FL_FLOCK;
459 fl->fl_end = OFFSET_MAX;
464 static int assign_type(struct file_lock *fl, long type)
478 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
481 switch (l->l_whence) {
486 fl->fl_start = filp->f_pos;
489 fl->fl_start = i_size_read(file_inode(filp));
494 if (l->l_start > OFFSET_MAX - fl->fl_start)
496 fl->fl_start += l->l_start;
497 if (fl->fl_start < 0)
500 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
501 POSIX-2001 defines it. */
503 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
505 fl->fl_end = fl->fl_start + l->l_len - 1;
507 } else if (l->l_len < 0) {
508 if (fl->fl_start + l->l_len < 0)
510 fl->fl_end = fl->fl_start - 1;
511 fl->fl_start += l->l_len;
513 fl->fl_end = OFFSET_MAX;
515 fl->fl_owner = current->files;
516 fl->fl_pid = current->tgid;
518 fl->fl_flags = FL_POSIX;
522 return assign_type(fl, l->l_type);
525 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
528 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
531 struct flock64 ll = {
533 .l_whence = l->l_whence,
534 .l_start = l->l_start,
538 return flock64_to_posix_lock(filp, fl, &ll);
541 /* default lease lock manager operations */
543 lease_break_callback(struct file_lock *fl)
545 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
550 lease_setup(struct file_lock *fl, void **priv)
552 struct file *filp = fl->fl_file;
553 struct fasync_struct *fa = *priv;
556 * fasync_insert_entry() returns the old entry if any. If there was no
557 * old entry, then it used "priv" and inserted it into the fasync list.
558 * Clear the pointer to indicate that it shouldn't be freed.
560 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
563 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
566 static const struct lock_manager_operations lease_manager_ops = {
567 .lm_break = lease_break_callback,
568 .lm_change = lease_modify,
569 .lm_setup = lease_setup,
573 * Initialize a lease, use the default lock manager operations
575 static int lease_init(struct file *filp, long type, struct file_lock *fl)
577 if (assign_type(fl, type) != 0)
581 fl->fl_pid = current->tgid;
584 fl->fl_flags = FL_LEASE;
586 fl->fl_end = OFFSET_MAX;
588 fl->fl_lmops = &lease_manager_ops;
592 /* Allocate a file_lock initialised to this type of lease */
593 static struct file_lock *lease_alloc(struct file *filp, long type)
595 struct file_lock *fl = locks_alloc_lock();
599 return ERR_PTR(error);
601 error = lease_init(filp, type, fl);
604 return ERR_PTR(error);
609 /* Check if two locks overlap each other.
611 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
613 return ((fl1->fl_end >= fl2->fl_start) &&
614 (fl2->fl_end >= fl1->fl_start));
618 * Check whether two locks have the same owner.
620 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
622 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
623 return fl2->fl_lmops == fl1->fl_lmops &&
624 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
625 return fl1->fl_owner == fl2->fl_owner;
628 /* Must be called with the flc_lock held! */
629 static void locks_insert_global_locks(struct file_lock *fl)
631 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
633 percpu_rwsem_assert_held(&file_rwsem);
635 spin_lock(&fll->lock);
636 fl->fl_link_cpu = smp_processor_id();
637 hlist_add_head(&fl->fl_link, &fll->hlist);
638 spin_unlock(&fll->lock);
641 /* Must be called with the flc_lock held! */
642 static void locks_delete_global_locks(struct file_lock *fl)
644 struct file_lock_list_struct *fll;
646 percpu_rwsem_assert_held(&file_rwsem);
649 * Avoid taking lock if already unhashed. This is safe since this check
650 * is done while holding the flc_lock, and new insertions into the list
651 * also require that it be held.
653 if (hlist_unhashed(&fl->fl_link))
656 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
657 spin_lock(&fll->lock);
658 hlist_del_init(&fl->fl_link);
659 spin_unlock(&fll->lock);
663 posix_owner_key(struct file_lock *fl)
665 if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
666 return fl->fl_lmops->lm_owner_key(fl);
667 return (unsigned long)fl->fl_owner;
670 static void locks_insert_global_blocked(struct file_lock *waiter)
672 lockdep_assert_held(&blocked_lock_lock);
674 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
677 static void locks_delete_global_blocked(struct file_lock *waiter)
679 lockdep_assert_held(&blocked_lock_lock);
681 hash_del(&waiter->fl_link);
684 /* Remove waiter from blocker's block list.
685 * When blocker ends up pointing to itself then the list is empty.
687 * Must be called with blocked_lock_lock held.
689 static void __locks_delete_block(struct file_lock *waiter)
691 locks_delete_global_blocked(waiter);
692 list_del_init(&waiter->fl_blocked_member);
693 waiter->fl_blocker = NULL;
696 static void __locks_wake_up_blocks(struct file_lock *blocker)
698 while (!list_empty(&blocker->fl_blocked_requests)) {
699 struct file_lock *waiter;
701 waiter = list_first_entry(&blocker->fl_blocked_requests,
702 struct file_lock, fl_blocked_member);
703 __locks_delete_block(waiter);
704 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
705 waiter->fl_lmops->lm_notify(waiter);
707 wake_up(&waiter->fl_wait);
711 static void locks_delete_block(struct file_lock *waiter)
714 * If fl_blocker is NULL, it won't be set again as this thread
715 * "owns" the lock and is the only one that might try to claim
716 * the lock. So it is safe to test fl_blocker locklessly.
717 * Also if fl_blocker is NULL, this waiter is not listed on
718 * fl_blocked_requests for some lock, so no other request can
719 * be added to the list of fl_blocked_requests for this
720 * request. So if fl_blocker is NULL, it is safe to
721 * locklessly check if fl_blocked_requests is empty. If both
722 * of these checks succeed, there is no need to take the lock.
724 if (waiter->fl_blocker == NULL &&
725 list_empty(&waiter->fl_blocked_requests))
727 spin_lock(&blocked_lock_lock);
728 __locks_wake_up_blocks(waiter);
729 __locks_delete_block(waiter);
730 spin_unlock(&blocked_lock_lock);
733 /* Insert waiter into blocker's block list.
734 * We use a circular list so that processes can be easily woken up in
735 * the order they blocked. The documentation doesn't require this but
736 * it seems like the reasonable thing to do.
738 * Must be called with both the flc_lock and blocked_lock_lock held. The
739 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
740 * but by ensuring that the flc_lock is also held on insertions we can avoid
741 * taking the blocked_lock_lock in some cases when we see that the
742 * fl_blocked_requests list is empty.
744 static void __locks_insert_block(struct file_lock *blocker,
745 struct file_lock *waiter)
747 BUG_ON(!list_empty(&waiter->fl_blocked_member));
748 waiter->fl_blocker = blocker;
749 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
750 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
751 locks_insert_global_blocked(waiter);
753 /* The requests in waiter->fl_blocked are known to conflict with
754 * waiter, but might not conflict with blocker, or the requests
755 * and lock which block it. So they all need to be woken.
757 __locks_wake_up_blocks(waiter);
760 /* Must be called with flc_lock held. */
761 static void locks_insert_block(struct file_lock *blocker,
762 struct file_lock *waiter)
764 spin_lock(&blocked_lock_lock);
765 __locks_insert_block(blocker, waiter);
766 spin_unlock(&blocked_lock_lock);
770 * Wake up processes blocked waiting for blocker.
772 * Must be called with the inode->flc_lock held!
774 static void locks_wake_up_blocks(struct file_lock *blocker)
777 * Avoid taking global lock if list is empty. This is safe since new
778 * blocked requests are only added to the list under the flc_lock, and
779 * the flc_lock is always held here. Note that removal from the
780 * fl_blocked_requests list does not require the flc_lock, so we must
781 * recheck list_empty() after acquiring the blocked_lock_lock.
783 if (list_empty(&blocker->fl_blocked_requests))
786 spin_lock(&blocked_lock_lock);
787 __locks_wake_up_blocks(blocker);
788 spin_unlock(&blocked_lock_lock);
792 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
794 list_add_tail(&fl->fl_list, before);
795 locks_insert_global_locks(fl);
799 locks_unlink_lock_ctx(struct file_lock *fl)
801 locks_delete_global_locks(fl);
802 list_del_init(&fl->fl_list);
803 locks_wake_up_blocks(fl);
807 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
809 locks_unlink_lock_ctx(fl);
811 list_add(&fl->fl_list, dispose);
816 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
817 * checks for shared/exclusive status of overlapping locks.
819 static bool locks_conflict(struct file_lock *caller_fl,
820 struct file_lock *sys_fl)
822 if (sys_fl->fl_type == F_WRLCK)
824 if (caller_fl->fl_type == F_WRLCK)
829 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
830 * checking before calling the locks_conflict().
832 static bool posix_locks_conflict(struct file_lock *caller_fl,
833 struct file_lock *sys_fl)
835 /* POSIX locks owned by the same process do not conflict with
838 if (posix_same_owner(caller_fl, sys_fl))
841 /* Check whether they overlap */
842 if (!locks_overlap(caller_fl, sys_fl))
845 return locks_conflict(caller_fl, sys_fl);
848 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
849 * checking before calling the locks_conflict().
851 static bool flock_locks_conflict(struct file_lock *caller_fl,
852 struct file_lock *sys_fl)
854 /* FLOCK locks referring to the same filp do not conflict with
857 if (caller_fl->fl_file == sys_fl->fl_file)
859 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
862 return locks_conflict(caller_fl, sys_fl);
866 posix_test_lock(struct file *filp, struct file_lock *fl)
868 struct file_lock *cfl;
869 struct file_lock_context *ctx;
870 struct inode *inode = locks_inode(filp);
872 ctx = smp_load_acquire(&inode->i_flctx);
873 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
874 fl->fl_type = F_UNLCK;
878 spin_lock(&ctx->flc_lock);
879 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
880 if (posix_locks_conflict(fl, cfl)) {
881 locks_copy_conflock(fl, cfl);
885 fl->fl_type = F_UNLCK;
887 spin_unlock(&ctx->flc_lock);
890 EXPORT_SYMBOL(posix_test_lock);
893 * Deadlock detection:
895 * We attempt to detect deadlocks that are due purely to posix file
898 * We assume that a task can be waiting for at most one lock at a time.
899 * So for any acquired lock, the process holding that lock may be
900 * waiting on at most one other lock. That lock in turns may be held by
901 * someone waiting for at most one other lock. Given a requested lock
902 * caller_fl which is about to wait for a conflicting lock block_fl, we
903 * follow this chain of waiters to ensure we are not about to create a
906 * Since we do this before we ever put a process to sleep on a lock, we
907 * are ensured that there is never a cycle; that is what guarantees that
908 * the while() loop in posix_locks_deadlock() eventually completes.
910 * Note: the above assumption may not be true when handling lock
911 * requests from a broken NFS client. It may also fail in the presence
912 * of tasks (such as posix threads) sharing the same open file table.
913 * To handle those cases, we just bail out after a few iterations.
915 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
916 * Because the owner is not even nominally tied to a thread of
917 * execution, the deadlock detection below can't reasonably work well. Just
920 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
921 * locks that just checks for the case where two tasks are attempting to
922 * upgrade from read to write locks on the same inode.
925 #define MAX_DEADLK_ITERATIONS 10
927 /* Find a lock that the owner of the given block_fl is blocking on. */
928 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
930 struct file_lock *fl;
932 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
933 if (posix_same_owner(fl, block_fl)) {
934 while (fl->fl_blocker)
942 /* Must be called with the blocked_lock_lock held! */
943 static int posix_locks_deadlock(struct file_lock *caller_fl,
944 struct file_lock *block_fl)
948 lockdep_assert_held(&blocked_lock_lock);
951 * This deadlock detector can't reasonably detect deadlocks with
952 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
954 if (IS_OFDLCK(caller_fl))
957 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
958 if (i++ > MAX_DEADLK_ITERATIONS)
960 if (posix_same_owner(caller_fl, block_fl))
966 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
967 * after any leases, but before any posix locks.
969 * Note that if called with an FL_EXISTS argument, the caller may determine
970 * whether or not a lock was successfully freed by testing the return
973 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
975 struct file_lock *new_fl = NULL;
976 struct file_lock *fl;
977 struct file_lock_context *ctx;
982 ctx = locks_get_lock_context(inode, request->fl_type);
984 if (request->fl_type != F_UNLCK)
986 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
989 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
990 new_fl = locks_alloc_lock();
995 percpu_down_read_preempt_disable(&file_rwsem);
996 spin_lock(&ctx->flc_lock);
997 if (request->fl_flags & FL_ACCESS)
1000 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1001 if (request->fl_file != fl->fl_file)
1003 if (request->fl_type == fl->fl_type)
1006 locks_delete_lock_ctx(fl, &dispose);
1010 if (request->fl_type == F_UNLCK) {
1011 if ((request->fl_flags & FL_EXISTS) && !found)
1017 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1018 if (!flock_locks_conflict(request, fl))
1021 if (!(request->fl_flags & FL_SLEEP))
1023 error = FILE_LOCK_DEFERRED;
1024 locks_insert_block(fl, request);
1027 if (request->fl_flags & FL_ACCESS)
1029 locks_copy_lock(new_fl, request);
1030 locks_move_blocks(new_fl, request);
1031 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1036 spin_unlock(&ctx->flc_lock);
1037 percpu_up_read_preempt_enable(&file_rwsem);
1039 locks_free_lock(new_fl);
1040 locks_dispose_list(&dispose);
1041 trace_flock_lock_inode(inode, request, error);
1045 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1046 struct file_lock *conflock)
1048 struct file_lock *fl, *tmp;
1049 struct file_lock *new_fl = NULL;
1050 struct file_lock *new_fl2 = NULL;
1051 struct file_lock *left = NULL;
1052 struct file_lock *right = NULL;
1053 struct file_lock_context *ctx;
1058 ctx = locks_get_lock_context(inode, request->fl_type);
1060 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1063 * We may need two file_lock structures for this operation,
1064 * so we get them in advance to avoid races.
1066 * In some cases we can be sure, that no new locks will be needed
1068 if (!(request->fl_flags & FL_ACCESS) &&
1069 (request->fl_type != F_UNLCK ||
1070 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1071 new_fl = locks_alloc_lock();
1072 new_fl2 = locks_alloc_lock();
1075 percpu_down_read_preempt_disable(&file_rwsem);
1076 spin_lock(&ctx->flc_lock);
1078 * New lock request. Walk all POSIX locks and look for conflicts. If
1079 * there are any, either return error or put the request on the
1080 * blocker's list of waiters and the global blocked_hash.
1082 if (request->fl_type != F_UNLCK) {
1083 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1084 if (!posix_locks_conflict(request, fl))
1087 locks_copy_conflock(conflock, fl);
1089 if (!(request->fl_flags & FL_SLEEP))
1092 * Deadlock detection and insertion into the blocked
1093 * locks list must be done while holding the same lock!
1096 spin_lock(&blocked_lock_lock);
1097 if (likely(!posix_locks_deadlock(request, fl))) {
1098 error = FILE_LOCK_DEFERRED;
1099 __locks_insert_block(fl, request);
1101 spin_unlock(&blocked_lock_lock);
1106 /* If we're just looking for a conflict, we're done. */
1108 if (request->fl_flags & FL_ACCESS)
1111 /* Find the first old lock with the same owner as the new lock */
1112 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1113 if (posix_same_owner(request, fl))
1117 /* Process locks with this owner. */
1118 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1119 if (!posix_same_owner(request, fl))
1122 /* Detect adjacent or overlapping regions (if same lock type) */
1123 if (request->fl_type == fl->fl_type) {
1124 /* In all comparisons of start vs end, use
1125 * "start - 1" rather than "end + 1". If end
1126 * is OFFSET_MAX, end + 1 will become negative.
1128 if (fl->fl_end < request->fl_start - 1)
1130 /* If the next lock in the list has entirely bigger
1131 * addresses than the new one, insert the lock here.
1133 if (fl->fl_start - 1 > request->fl_end)
1136 /* If we come here, the new and old lock are of the
1137 * same type and adjacent or overlapping. Make one
1138 * lock yielding from the lower start address of both
1139 * locks to the higher end address.
1141 if (fl->fl_start > request->fl_start)
1142 fl->fl_start = request->fl_start;
1144 request->fl_start = fl->fl_start;
1145 if (fl->fl_end < request->fl_end)
1146 fl->fl_end = request->fl_end;
1148 request->fl_end = fl->fl_end;
1150 locks_delete_lock_ctx(fl, &dispose);
1156 /* Processing for different lock types is a bit
1159 if (fl->fl_end < request->fl_start)
1161 if (fl->fl_start > request->fl_end)
1163 if (request->fl_type == F_UNLCK)
1165 if (fl->fl_start < request->fl_start)
1167 /* If the next lock in the list has a higher end
1168 * address than the new one, insert the new one here.
1170 if (fl->fl_end > request->fl_end) {
1174 if (fl->fl_start >= request->fl_start) {
1175 /* The new lock completely replaces an old
1176 * one (This may happen several times).
1179 locks_delete_lock_ctx(fl, &dispose);
1183 * Replace the old lock with new_fl, and
1184 * remove the old one. It's safe to do the
1185 * insert here since we know that we won't be
1186 * using new_fl later, and that the lock is
1187 * just replacing an existing lock.
1192 locks_copy_lock(new_fl, request);
1195 locks_insert_lock_ctx(request, &fl->fl_list);
1196 locks_delete_lock_ctx(fl, &dispose);
1203 * The above code only modifies existing locks in case of merging or
1204 * replacing. If new lock(s) need to be inserted all modifications are
1205 * done below this, so it's safe yet to bail out.
1207 error = -ENOLCK; /* "no luck" */
1208 if (right && left == right && !new_fl2)
1213 if (request->fl_type == F_UNLCK) {
1214 if (request->fl_flags & FL_EXISTS)
1223 locks_copy_lock(new_fl, request);
1224 locks_move_blocks(new_fl, request);
1225 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1230 if (left == right) {
1231 /* The new lock breaks the old one in two pieces,
1232 * so we have to use the second new lock.
1236 locks_copy_lock(left, right);
1237 locks_insert_lock_ctx(left, &fl->fl_list);
1239 right->fl_start = request->fl_end + 1;
1240 locks_wake_up_blocks(right);
1243 left->fl_end = request->fl_start - 1;
1244 locks_wake_up_blocks(left);
1247 spin_unlock(&ctx->flc_lock);
1248 percpu_up_read_preempt_enable(&file_rwsem);
1250 * Free any unused locks.
1253 locks_free_lock(new_fl);
1255 locks_free_lock(new_fl2);
1256 locks_dispose_list(&dispose);
1257 trace_posix_lock_inode(inode, request, error);
1263 * posix_lock_file - Apply a POSIX-style lock to a file
1264 * @filp: The file to apply the lock to
1265 * @fl: The lock to be applied
1266 * @conflock: Place to return a copy of the conflicting lock, if found.
1268 * Add a POSIX style lock to a file.
1269 * We merge adjacent & overlapping locks whenever possible.
1270 * POSIX locks are sorted by owner task, then by starting address
1272 * Note that if called with an FL_EXISTS argument, the caller may determine
1273 * whether or not a lock was successfully freed by testing the return
1274 * value for -ENOENT.
1276 int posix_lock_file(struct file *filp, struct file_lock *fl,
1277 struct file_lock *conflock)
1279 return posix_lock_inode(locks_inode(filp), fl, conflock);
1281 EXPORT_SYMBOL(posix_lock_file);
1284 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1285 * @inode: inode of file to which lock request should be applied
1286 * @fl: The lock to be applied
1288 * Apply a POSIX style lock request to an inode.
1290 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1295 error = posix_lock_inode(inode, fl, NULL);
1296 if (error != FILE_LOCK_DEFERRED)
1298 error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
1302 locks_delete_block(fl);
1306 #ifdef CONFIG_MANDATORY_FILE_LOCKING
1308 * locks_mandatory_locked - Check for an active lock
1309 * @file: the file to check
1311 * Searches the inode's list of locks to find any POSIX locks which conflict.
1312 * This function is called from locks_verify_locked() only.
1314 int locks_mandatory_locked(struct file *file)
1317 struct inode *inode = locks_inode(file);
1318 struct file_lock_context *ctx;
1319 struct file_lock *fl;
1321 ctx = smp_load_acquire(&inode->i_flctx);
1322 if (!ctx || list_empty_careful(&ctx->flc_posix))
1326 * Search the lock list for this inode for any POSIX locks.
1328 spin_lock(&ctx->flc_lock);
1330 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1331 if (fl->fl_owner != current->files &&
1332 fl->fl_owner != file) {
1337 spin_unlock(&ctx->flc_lock);
1342 * locks_mandatory_area - Check for a conflicting lock
1343 * @inode: the file to check
1344 * @filp: how the file was opened (if it was)
1345 * @start: first byte in the file to check
1346 * @end: lastbyte in the file to check
1347 * @type: %F_WRLCK for a write lock, else %F_RDLCK
1349 * Searches the inode's list of locks to find any POSIX locks which conflict.
1351 int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1352 loff_t end, unsigned char type)
1354 struct file_lock fl;
1358 locks_init_lock(&fl);
1359 fl.fl_pid = current->tgid;
1361 fl.fl_flags = FL_POSIX | FL_ACCESS;
1362 if (filp && !(filp->f_flags & O_NONBLOCK))
1365 fl.fl_start = start;
1371 fl.fl_flags &= ~FL_SLEEP;
1372 error = posix_lock_inode(inode, &fl, NULL);
1378 fl.fl_flags |= FL_SLEEP;
1379 fl.fl_owner = current->files;
1380 error = posix_lock_inode(inode, &fl, NULL);
1381 if (error != FILE_LOCK_DEFERRED)
1383 error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
1386 * If we've been sleeping someone might have
1387 * changed the permissions behind our back.
1389 if (__mandatory_lock(inode))
1395 locks_delete_block(&fl);
1400 EXPORT_SYMBOL(locks_mandatory_area);
1401 #endif /* CONFIG_MANDATORY_FILE_LOCKING */
1403 static void lease_clear_pending(struct file_lock *fl, int arg)
1407 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1410 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1414 /* We already had a lease on this file; just change its type */
1415 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1417 int error = assign_type(fl, arg);
1421 lease_clear_pending(fl, arg);
1422 locks_wake_up_blocks(fl);
1423 if (arg == F_UNLCK) {
1424 struct file *filp = fl->fl_file;
1427 filp->f_owner.signum = 0;
1428 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1429 if (fl->fl_fasync != NULL) {
1430 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1431 fl->fl_fasync = NULL;
1433 locks_delete_lock_ctx(fl, dispose);
1437 EXPORT_SYMBOL(lease_modify);
1439 static bool past_time(unsigned long then)
1442 /* 0 is a special value meaning "this never expires": */
1444 return time_after(jiffies, then);
1447 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1449 struct file_lock_context *ctx = inode->i_flctx;
1450 struct file_lock *fl, *tmp;
1452 lockdep_assert_held(&ctx->flc_lock);
1454 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1455 trace_time_out_leases(inode, fl);
1456 if (past_time(fl->fl_downgrade_time))
1457 lease_modify(fl, F_RDLCK, dispose);
1458 if (past_time(fl->fl_break_time))
1459 lease_modify(fl, F_UNLCK, dispose);
1463 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1465 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1467 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1469 return locks_conflict(breaker, lease);
1473 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1475 struct file_lock_context *ctx = inode->i_flctx;
1476 struct file_lock *fl;
1478 lockdep_assert_held(&ctx->flc_lock);
1480 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1481 if (leases_conflict(fl, breaker))
1488 * __break_lease - revoke all outstanding leases on file
1489 * @inode: the inode of the file to return
1490 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1492 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1495 * break_lease (inlined for speed) has checked there already is at least
1496 * some kind of lock (maybe a lease) on this file. Leases are broken on
1497 * a call to open() or truncate(). This function can sleep unless you
1498 * specified %O_NONBLOCK to your open().
1500 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1503 struct file_lock_context *ctx;
1504 struct file_lock *new_fl, *fl, *tmp;
1505 unsigned long break_time;
1506 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1509 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1511 return PTR_ERR(new_fl);
1512 new_fl->fl_flags = type;
1514 /* typically we will check that ctx is non-NULL before calling */
1515 ctx = smp_load_acquire(&inode->i_flctx);
1521 percpu_down_read_preempt_disable(&file_rwsem);
1522 spin_lock(&ctx->flc_lock);
1524 time_out_leases(inode, &dispose);
1526 if (!any_leases_conflict(inode, new_fl))
1530 if (lease_break_time > 0) {
1531 break_time = jiffies + lease_break_time * HZ;
1532 if (break_time == 0)
1533 break_time++; /* so that 0 means no break time */
1536 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1537 if (!leases_conflict(fl, new_fl))
1540 if (fl->fl_flags & FL_UNLOCK_PENDING)
1542 fl->fl_flags |= FL_UNLOCK_PENDING;
1543 fl->fl_break_time = break_time;
1545 if (lease_breaking(fl))
1547 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1548 fl->fl_downgrade_time = break_time;
1550 if (fl->fl_lmops->lm_break(fl))
1551 locks_delete_lock_ctx(fl, &dispose);
1554 if (list_empty(&ctx->flc_lease))
1557 if (mode & O_NONBLOCK) {
1558 trace_break_lease_noblock(inode, new_fl);
1559 error = -EWOULDBLOCK;
1564 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1565 break_time = fl->fl_break_time;
1566 if (break_time != 0)
1567 break_time -= jiffies;
1568 if (break_time == 0)
1570 locks_insert_block(fl, new_fl);
1571 trace_break_lease_block(inode, new_fl);
1572 spin_unlock(&ctx->flc_lock);
1573 percpu_up_read_preempt_enable(&file_rwsem);
1575 locks_dispose_list(&dispose);
1576 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1577 !new_fl->fl_blocker, break_time);
1579 percpu_down_read_preempt_disable(&file_rwsem);
1580 spin_lock(&ctx->flc_lock);
1581 trace_break_lease_unblock(inode, new_fl);
1582 locks_delete_block(new_fl);
1585 * Wait for the next conflicting lease that has not been
1589 time_out_leases(inode, &dispose);
1590 if (any_leases_conflict(inode, new_fl))
1595 spin_unlock(&ctx->flc_lock);
1596 percpu_up_read_preempt_enable(&file_rwsem);
1597 locks_dispose_list(&dispose);
1598 locks_free_lock(new_fl);
1602 EXPORT_SYMBOL(__break_lease);
1605 * lease_get_mtime - update modified time of an inode with exclusive lease
1607 * @time: pointer to a timespec which contains the last modified time
1609 * This is to force NFS clients to flush their caches for files with
1610 * exclusive leases. The justification is that if someone has an
1611 * exclusive lease, then they could be modifying it.
1613 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1615 bool has_lease = false;
1616 struct file_lock_context *ctx;
1617 struct file_lock *fl;
1619 ctx = smp_load_acquire(&inode->i_flctx);
1620 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1621 spin_lock(&ctx->flc_lock);
1622 fl = list_first_entry_or_null(&ctx->flc_lease,
1623 struct file_lock, fl_list);
1624 if (fl && (fl->fl_type == F_WRLCK))
1626 spin_unlock(&ctx->flc_lock);
1630 *time = current_time(inode);
1633 EXPORT_SYMBOL(lease_get_mtime);
1636 * fcntl_getlease - Enquire what lease is currently active
1639 * The value returned by this function will be one of
1640 * (if no lease break is pending):
1642 * %F_RDLCK to indicate a shared lease is held.
1644 * %F_WRLCK to indicate an exclusive lease is held.
1646 * %F_UNLCK to indicate no lease is held.
1648 * (if a lease break is pending):
1650 * %F_RDLCK to indicate an exclusive lease needs to be
1651 * changed to a shared lease (or removed).
1653 * %F_UNLCK to indicate the lease needs to be removed.
1655 * XXX: sfr & willy disagree over whether F_INPROGRESS
1656 * should be returned to userspace.
1658 int fcntl_getlease(struct file *filp)
1660 struct file_lock *fl;
1661 struct inode *inode = locks_inode(filp);
1662 struct file_lock_context *ctx;
1666 ctx = smp_load_acquire(&inode->i_flctx);
1667 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1668 percpu_down_read_preempt_disable(&file_rwsem);
1669 spin_lock(&ctx->flc_lock);
1670 time_out_leases(inode, &dispose);
1671 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1672 if (fl->fl_file != filp)
1674 type = target_leasetype(fl);
1677 spin_unlock(&ctx->flc_lock);
1678 percpu_up_read_preempt_enable(&file_rwsem);
1680 locks_dispose_list(&dispose);
1686 * check_conflicting_open - see if the given dentry points to a file that has
1687 * an existing open that would conflict with the
1689 * @dentry: dentry to check
1690 * @arg: type of lease that we're trying to acquire
1691 * @flags: current lock flags
1693 * Check to see if there's an existing open fd on this file that would
1694 * conflict with the lease we're trying to set.
1697 check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1700 struct inode *inode = dentry->d_inode;
1702 if (flags & FL_LAYOUT)
1705 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1708 if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1709 (atomic_read(&inode->i_count) > 1)))
1716 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1718 struct file_lock *fl, *my_fl = NULL, *lease;
1719 struct dentry *dentry = filp->f_path.dentry;
1720 struct inode *inode = dentry->d_inode;
1721 struct file_lock_context *ctx;
1722 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1727 trace_generic_add_lease(inode, lease);
1729 /* Note that arg is never F_UNLCK here */
1730 ctx = locks_get_lock_context(inode, arg);
1735 * In the delegation case we need mutual exclusion with
1736 * a number of operations that take the i_mutex. We trylock
1737 * because delegations are an optional optimization, and if
1738 * there's some chance of a conflict--we'd rather not
1739 * bother, maybe that's a sign this just isn't a good file to
1740 * hand out a delegation on.
1742 if (is_deleg && !inode_trylock(inode))
1745 if (is_deleg && arg == F_WRLCK) {
1746 /* Write delegations are not currently supported: */
1747 inode_unlock(inode);
1752 percpu_down_read_preempt_disable(&file_rwsem);
1753 spin_lock(&ctx->flc_lock);
1754 time_out_leases(inode, &dispose);
1755 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1760 * At this point, we know that if there is an exclusive
1761 * lease on this file, then we hold it on this filp
1762 * (otherwise our open of this file would have blocked).
1763 * And if we are trying to acquire an exclusive lease,
1764 * then the file is not open by anyone (including us)
1765 * except for this filp.
1768 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1769 if (fl->fl_file == filp &&
1770 fl->fl_owner == lease->fl_owner) {
1776 * No exclusive leases if someone else has a lease on
1782 * Modifying our existing lease is OK, but no getting a
1783 * new lease if someone else is opening for write:
1785 if (fl->fl_flags & FL_UNLOCK_PENDING)
1789 if (my_fl != NULL) {
1791 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1801 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1803 * The check in break_lease() is lockless. It's possible for another
1804 * open to race in after we did the earlier check for a conflicting
1805 * open but before the lease was inserted. Check again for a
1806 * conflicting open and cancel the lease if there is one.
1808 * We also add a barrier here to ensure that the insertion of the lock
1809 * precedes these checks.
1812 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1814 locks_unlink_lock_ctx(lease);
1819 if (lease->fl_lmops->lm_setup)
1820 lease->fl_lmops->lm_setup(lease, priv);
1822 spin_unlock(&ctx->flc_lock);
1823 percpu_up_read_preempt_enable(&file_rwsem);
1824 locks_dispose_list(&dispose);
1826 inode_unlock(inode);
1827 if (!error && !my_fl)
1832 static int generic_delete_lease(struct file *filp, void *owner)
1834 int error = -EAGAIN;
1835 struct file_lock *fl, *victim = NULL;
1836 struct inode *inode = locks_inode(filp);
1837 struct file_lock_context *ctx;
1840 ctx = smp_load_acquire(&inode->i_flctx);
1842 trace_generic_delete_lease(inode, NULL);
1846 percpu_down_read_preempt_disable(&file_rwsem);
1847 spin_lock(&ctx->flc_lock);
1848 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1849 if (fl->fl_file == filp &&
1850 fl->fl_owner == owner) {
1855 trace_generic_delete_lease(inode, victim);
1857 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1858 spin_unlock(&ctx->flc_lock);
1859 percpu_up_read_preempt_enable(&file_rwsem);
1860 locks_dispose_list(&dispose);
1865 * generic_setlease - sets a lease on an open file
1866 * @filp: file pointer
1867 * @arg: type of lease to obtain
1868 * @flp: input - file_lock to use, output - file_lock inserted
1869 * @priv: private data for lm_setup (may be NULL if lm_setup
1870 * doesn't require it)
1872 * The (input) flp->fl_lmops->lm_break function is required
1875 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1878 struct inode *inode = locks_inode(filp);
1881 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1883 if (!S_ISREG(inode->i_mode))
1885 error = security_file_lock(filp, arg);
1891 return generic_delete_lease(filp, *priv);
1894 if (!(*flp)->fl_lmops->lm_break) {
1899 return generic_add_lease(filp, arg, flp, priv);
1904 EXPORT_SYMBOL(generic_setlease);
1907 * vfs_setlease - sets a lease on an open file
1908 * @filp: file pointer
1909 * @arg: type of lease to obtain
1910 * @lease: file_lock to use when adding a lease
1911 * @priv: private info for lm_setup when adding a lease (may be
1912 * NULL if lm_setup doesn't require it)
1914 * Call this to establish a lease on the file. The "lease" argument is not
1915 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1916 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1917 * set; if not, this function will return -ENOLCK (and generate a scary-looking
1920 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1921 * may be NULL if the lm_setup operation doesn't require it.
1924 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1926 if (filp->f_op->setlease)
1927 return filp->f_op->setlease(filp, arg, lease, priv);
1929 return generic_setlease(filp, arg, lease, priv);
1931 EXPORT_SYMBOL_GPL(vfs_setlease);
1933 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1935 struct file_lock *fl;
1936 struct fasync_struct *new;
1939 fl = lease_alloc(filp, arg);
1943 new = fasync_alloc();
1945 locks_free_lock(fl);
1950 error = vfs_setlease(filp, arg, &fl, (void **)&new);
1952 locks_free_lock(fl);
1959 * fcntl_setlease - sets a lease on an open file
1960 * @fd: open file descriptor
1961 * @filp: file pointer
1962 * @arg: type of lease to obtain
1964 * Call this fcntl to establish a lease on the file.
1965 * Note that you also need to call %F_SETSIG to
1966 * receive a signal when the lease is broken.
1968 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1971 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1972 return do_fcntl_add_lease(fd, filp, arg);
1976 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
1977 * @inode: inode of the file to apply to
1978 * @fl: The lock to be applied
1980 * Apply a FLOCK style lock request to an inode.
1982 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1987 error = flock_lock_inode(inode, fl);
1988 if (error != FILE_LOCK_DEFERRED)
1990 error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
1994 locks_delete_block(fl);
1999 * locks_lock_inode_wait - Apply a lock to an inode
2000 * @inode: inode of the file to apply to
2001 * @fl: The lock to be applied
2003 * Apply a POSIX or FLOCK style lock request to an inode.
2005 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2008 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2010 res = posix_lock_inode_wait(inode, fl);
2013 res = flock_lock_inode_wait(inode, fl);
2020 EXPORT_SYMBOL(locks_lock_inode_wait);
2023 * sys_flock: - flock() system call.
2024 * @fd: the file descriptor to lock.
2025 * @cmd: the type of lock to apply.
2027 * Apply a %FL_FLOCK style lock to an open file descriptor.
2028 * The @cmd can be one of:
2030 * - %LOCK_SH -- a shared lock.
2031 * - %LOCK_EX -- an exclusive lock.
2032 * - %LOCK_UN -- remove an existing lock.
2033 * - %LOCK_MAND -- a 'mandatory' flock.
2034 * This exists to emulate Windows Share Modes.
2036 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
2037 * processes read and write access respectively.
2039 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2041 struct fd f = fdget(fd);
2042 struct file_lock *lock;
2043 int can_sleep, unlock;
2050 can_sleep = !(cmd & LOCK_NB);
2052 unlock = (cmd == LOCK_UN);
2054 if (!unlock && !(cmd & LOCK_MAND) &&
2055 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2058 lock = flock_make_lock(f.file, cmd, NULL);
2060 error = PTR_ERR(lock);
2065 lock->fl_flags |= FL_SLEEP;
2067 error = security_file_lock(f.file, lock->fl_type);
2071 if (f.file->f_op->flock)
2072 error = f.file->f_op->flock(f.file,
2073 (can_sleep) ? F_SETLKW : F_SETLK,
2076 error = locks_lock_file_wait(f.file, lock);
2079 locks_free_lock(lock);
2088 * vfs_test_lock - test file byte range lock
2089 * @filp: The file to test lock for
2090 * @fl: The lock to test; also used to hold result
2092 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
2093 * setting conf->fl_type to something other than F_UNLCK.
2095 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2097 if (filp->f_op->lock)
2098 return filp->f_op->lock(filp, F_GETLK, fl);
2099 posix_test_lock(filp, fl);
2102 EXPORT_SYMBOL_GPL(vfs_test_lock);
2105 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2106 * @fl: The file_lock who's fl_pid should be translated
2107 * @ns: The namespace into which the pid should be translated
2109 * Used to tranlate a fl_pid into a namespace virtual pid number
2111 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2118 if (IS_REMOTELCK(fl))
2121 * If the flock owner process is dead and its pid has been already
2122 * freed, the translation below won't work, but we still want to show
2123 * flock owner pid number in init pidns.
2125 if (ns == &init_pid_ns)
2126 return (pid_t)fl->fl_pid;
2129 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2130 vnr = pid_nr_ns(pid, ns);
2135 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2137 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2138 #if BITS_PER_LONG == 32
2140 * Make sure we can represent the posix lock via
2141 * legacy 32bit flock.
2143 if (fl->fl_start > OFFT_OFFSET_MAX)
2145 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2148 flock->l_start = fl->fl_start;
2149 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2150 fl->fl_end - fl->fl_start + 1;
2151 flock->l_whence = 0;
2152 flock->l_type = fl->fl_type;
2156 #if BITS_PER_LONG == 32
2157 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2159 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2160 flock->l_start = fl->fl_start;
2161 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2162 fl->fl_end - fl->fl_start + 1;
2163 flock->l_whence = 0;
2164 flock->l_type = fl->fl_type;
2168 /* Report the first existing lock that would conflict with l.
2169 * This implements the F_GETLK command of fcntl().
2171 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2173 struct file_lock *fl;
2176 fl = locks_alloc_lock();
2180 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2183 error = flock_to_posix_lock(filp, fl, flock);
2187 if (cmd == F_OFD_GETLK) {
2189 if (flock->l_pid != 0)
2193 fl->fl_flags |= FL_OFDLCK;
2194 fl->fl_owner = filp;
2197 error = vfs_test_lock(filp, fl);
2201 flock->l_type = fl->fl_type;
2202 if (fl->fl_type != F_UNLCK) {
2203 error = posix_lock_to_flock(flock, fl);
2208 locks_free_lock(fl);
2213 * vfs_lock_file - file byte range lock
2214 * @filp: The file to apply the lock to
2215 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2216 * @fl: The lock to be applied
2217 * @conf: Place to return a copy of the conflicting lock, if found.
2219 * A caller that doesn't care about the conflicting lock may pass NULL
2220 * as the final argument.
2222 * If the filesystem defines a private ->lock() method, then @conf will
2223 * be left unchanged; so a caller that cares should initialize it to
2224 * some acceptable default.
2226 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2227 * locks, the ->lock() interface may return asynchronously, before the lock has
2228 * been granted or denied by the underlying filesystem, if (and only if)
2229 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2230 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2231 * the request is for a blocking lock. When ->lock() does return asynchronously,
2232 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2233 * request completes.
2234 * If the request is for non-blocking lock the file system should return
2235 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2236 * with the result. If the request timed out the callback routine will return a
2237 * nonzero return code and the file system should release the lock. The file
2238 * system is also responsible to keep a corresponding posix lock when it
2239 * grants a lock so the VFS can find out which locks are locally held and do
2240 * the correct lock cleanup when required.
2241 * The underlying filesystem must not drop the kernel lock or call
2242 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2245 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2247 if (filp->f_op->lock)
2248 return filp->f_op->lock(filp, cmd, fl);
2250 return posix_lock_file(filp, fl, conf);
2252 EXPORT_SYMBOL_GPL(vfs_lock_file);
2254 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2255 struct file_lock *fl)
2259 error = security_file_lock(filp, fl->fl_type);
2264 error = vfs_lock_file(filp, cmd, fl, NULL);
2265 if (error != FILE_LOCK_DEFERRED)
2267 error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
2271 locks_delete_block(fl);
2276 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2278 check_fmode_for_setlk(struct file_lock *fl)
2280 switch (fl->fl_type) {
2282 if (!(fl->fl_file->f_mode & FMODE_READ))
2286 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2292 /* Apply the lock described by l to an open file descriptor.
2293 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2295 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2296 struct flock *flock)
2298 struct file_lock *file_lock = locks_alloc_lock();
2299 struct inode *inode = locks_inode(filp);
2303 if (file_lock == NULL)
2306 /* Don't allow mandatory locks on files that may be memory mapped
2309 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2314 error = flock_to_posix_lock(filp, file_lock, flock);
2318 error = check_fmode_for_setlk(file_lock);
2323 * If the cmd is requesting file-private locks, then set the
2324 * FL_OFDLCK flag and override the owner.
2329 if (flock->l_pid != 0)
2333 file_lock->fl_flags |= FL_OFDLCK;
2334 file_lock->fl_owner = filp;
2338 if (flock->l_pid != 0)
2342 file_lock->fl_flags |= FL_OFDLCK;
2343 file_lock->fl_owner = filp;
2346 file_lock->fl_flags |= FL_SLEEP;
2349 error = do_lock_file_wait(filp, cmd, file_lock);
2352 * Attempt to detect a close/fcntl race and recover by releasing the
2353 * lock that was just acquired. There is no need to do that when we're
2354 * unlocking though, or for OFD locks.
2356 if (!error && file_lock->fl_type != F_UNLCK &&
2357 !(file_lock->fl_flags & FL_OFDLCK)) {
2359 * We need that spin_lock here - it prevents reordering between
2360 * update of i_flctx->flc_posix and check for it done in
2361 * close(). rcu_read_lock() wouldn't do.
2363 spin_lock(¤t->files->file_lock);
2365 spin_unlock(¤t->files->file_lock);
2367 file_lock->fl_type = F_UNLCK;
2368 error = do_lock_file_wait(filp, cmd, file_lock);
2369 WARN_ON_ONCE(error);
2374 trace_fcntl_setlk(inode, file_lock, error);
2375 locks_free_lock(file_lock);
2379 #if BITS_PER_LONG == 32
2380 /* Report the first existing lock that would conflict with l.
2381 * This implements the F_GETLK command of fcntl().
2383 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2385 struct file_lock *fl;
2388 fl = locks_alloc_lock();
2393 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2396 error = flock64_to_posix_lock(filp, fl, flock);
2400 if (cmd == F_OFD_GETLK) {
2402 if (flock->l_pid != 0)
2406 fl->fl_flags |= FL_OFDLCK;
2407 fl->fl_owner = filp;
2410 error = vfs_test_lock(filp, fl);
2414 flock->l_type = fl->fl_type;
2415 if (fl->fl_type != F_UNLCK)
2416 posix_lock_to_flock64(flock, fl);
2419 locks_free_lock(fl);
2423 /* Apply the lock described by l to an open file descriptor.
2424 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2426 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2427 struct flock64 *flock)
2429 struct file_lock *file_lock = locks_alloc_lock();
2430 struct inode *inode = locks_inode(filp);
2434 if (file_lock == NULL)
2437 /* Don't allow mandatory locks on files that may be memory mapped
2440 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2445 error = flock64_to_posix_lock(filp, file_lock, flock);
2449 error = check_fmode_for_setlk(file_lock);
2454 * If the cmd is requesting file-private locks, then set the
2455 * FL_OFDLCK flag and override the owner.
2460 if (flock->l_pid != 0)
2464 file_lock->fl_flags |= FL_OFDLCK;
2465 file_lock->fl_owner = filp;
2469 if (flock->l_pid != 0)
2473 file_lock->fl_flags |= FL_OFDLCK;
2474 file_lock->fl_owner = filp;
2477 file_lock->fl_flags |= FL_SLEEP;
2480 error = do_lock_file_wait(filp, cmd, file_lock);
2483 * Attempt to detect a close/fcntl race and recover by releasing the
2484 * lock that was just acquired. There is no need to do that when we're
2485 * unlocking though, or for OFD locks.
2487 if (!error && file_lock->fl_type != F_UNLCK &&
2488 !(file_lock->fl_flags & FL_OFDLCK)) {
2490 * We need that spin_lock here - it prevents reordering between
2491 * update of i_flctx->flc_posix and check for it done in
2492 * close(). rcu_read_lock() wouldn't do.
2494 spin_lock(¤t->files->file_lock);
2496 spin_unlock(¤t->files->file_lock);
2498 file_lock->fl_type = F_UNLCK;
2499 error = do_lock_file_wait(filp, cmd, file_lock);
2500 WARN_ON_ONCE(error);
2505 locks_free_lock(file_lock);
2508 #endif /* BITS_PER_LONG == 32 */
2511 * This function is called when the file is being removed
2512 * from the task's fd array. POSIX locks belonging to this task
2513 * are deleted at this time.
2515 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2518 struct inode *inode = locks_inode(filp);
2519 struct file_lock lock;
2520 struct file_lock_context *ctx;
2523 * If there are no locks held on this file, we don't need to call
2524 * posix_lock_file(). Another process could be setting a lock on this
2525 * file at the same time, but we wouldn't remove that lock anyway.
2527 ctx = smp_load_acquire(&inode->i_flctx);
2528 if (!ctx || list_empty(&ctx->flc_posix))
2531 locks_init_lock(&lock);
2532 lock.fl_type = F_UNLCK;
2533 lock.fl_flags = FL_POSIX | FL_CLOSE;
2535 lock.fl_end = OFFSET_MAX;
2536 lock.fl_owner = owner;
2537 lock.fl_pid = current->tgid;
2538 lock.fl_file = filp;
2540 lock.fl_lmops = NULL;
2542 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2544 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2545 lock.fl_ops->fl_release_private(&lock);
2546 trace_locks_remove_posix(inode, &lock, error);
2549 EXPORT_SYMBOL(locks_remove_posix);
2551 /* The i_flctx must be valid when calling into here */
2553 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2555 struct file_lock fl;
2556 struct inode *inode = locks_inode(filp);
2558 if (list_empty(&flctx->flc_flock))
2561 flock_make_lock(filp, LOCK_UN, &fl);
2562 fl.fl_flags |= FL_CLOSE;
2564 if (filp->f_op->flock)
2565 filp->f_op->flock(filp, F_SETLKW, &fl);
2567 flock_lock_inode(inode, &fl);
2569 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2570 fl.fl_ops->fl_release_private(&fl);
2573 /* The i_flctx must be valid when calling into here */
2575 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2577 struct file_lock *fl, *tmp;
2580 if (list_empty(&ctx->flc_lease))
2583 percpu_down_read_preempt_disable(&file_rwsem);
2584 spin_lock(&ctx->flc_lock);
2585 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2586 if (filp == fl->fl_file)
2587 lease_modify(fl, F_UNLCK, &dispose);
2588 spin_unlock(&ctx->flc_lock);
2589 percpu_up_read_preempt_enable(&file_rwsem);
2591 locks_dispose_list(&dispose);
2595 * This function is called on the last close of an open file.
2597 void locks_remove_file(struct file *filp)
2599 struct file_lock_context *ctx;
2601 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2605 /* remove any OFD locks */
2606 locks_remove_posix(filp, filp);
2608 /* remove flock locks */
2609 locks_remove_flock(filp, ctx);
2611 /* remove any leases */
2612 locks_remove_lease(filp, ctx);
2614 spin_lock(&ctx->flc_lock);
2615 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2616 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2617 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2618 spin_unlock(&ctx->flc_lock);
2622 * posix_unblock_lock - stop waiting for a file lock
2623 * @waiter: the lock which was waiting
2625 * lockd needs to block waiting for locks.
2628 posix_unblock_lock(struct file_lock *waiter)
2630 int status = -ENOENT;
2632 spin_lock(&blocked_lock_lock);
2633 if (waiter->fl_blocker) {
2634 __locks_wake_up_blocks(waiter);
2635 __locks_delete_block(waiter);
2638 spin_unlock(&blocked_lock_lock);
2641 EXPORT_SYMBOL(posix_unblock_lock);
2644 * vfs_cancel_lock - file byte range unblock lock
2645 * @filp: The file to apply the unblock to
2646 * @fl: The lock to be unblocked
2648 * Used by lock managers to cancel blocked requests
2650 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2652 if (filp->f_op->lock)
2653 return filp->f_op->lock(filp, F_CANCELLK, fl);
2657 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2659 #ifdef CONFIG_PROC_FS
2660 #include <linux/proc_fs.h>
2661 #include <linux/seq_file.h>
2663 struct locks_iterator {
2668 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2669 loff_t id, char *pfx)
2671 struct inode *inode = NULL;
2672 unsigned int fl_pid;
2673 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2675 fl_pid = locks_translate_pid(fl, proc_pidns);
2677 * If lock owner is dead (and pid is freed) or not visible in current
2678 * pidns, zero is shown as a pid value. Check lock info from
2679 * init_pid_ns to get saved lock pid value.
2682 if (fl->fl_file != NULL)
2683 inode = locks_inode(fl->fl_file);
2685 seq_printf(f, "%lld:%s ", id, pfx);
2687 if (fl->fl_flags & FL_ACCESS)
2688 seq_puts(f, "ACCESS");
2689 else if (IS_OFDLCK(fl))
2690 seq_puts(f, "OFDLCK");
2692 seq_puts(f, "POSIX ");
2694 seq_printf(f, " %s ",
2695 (inode == NULL) ? "*NOINODE*" :
2696 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2697 } else if (IS_FLOCK(fl)) {
2698 if (fl->fl_type & LOCK_MAND) {
2699 seq_puts(f, "FLOCK MSNFS ");
2701 seq_puts(f, "FLOCK ADVISORY ");
2703 } else if (IS_LEASE(fl)) {
2704 if (fl->fl_flags & FL_DELEG)
2705 seq_puts(f, "DELEG ");
2707 seq_puts(f, "LEASE ");
2709 if (lease_breaking(fl))
2710 seq_puts(f, "BREAKING ");
2711 else if (fl->fl_file)
2712 seq_puts(f, "ACTIVE ");
2714 seq_puts(f, "BREAKER ");
2716 seq_puts(f, "UNKNOWN UNKNOWN ");
2718 if (fl->fl_type & LOCK_MAND) {
2719 seq_printf(f, "%s ",
2720 (fl->fl_type & LOCK_READ)
2721 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2722 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2724 seq_printf(f, "%s ",
2725 (lease_breaking(fl))
2726 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2727 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2730 /* userspace relies on this representation of dev_t */
2731 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2732 MAJOR(inode->i_sb->s_dev),
2733 MINOR(inode->i_sb->s_dev), inode->i_ino);
2735 seq_printf(f, "%d <none>:0 ", fl_pid);
2738 if (fl->fl_end == OFFSET_MAX)
2739 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2741 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2743 seq_puts(f, "0 EOF\n");
2747 static int locks_show(struct seq_file *f, void *v)
2749 struct locks_iterator *iter = f->private;
2750 struct file_lock *fl, *bfl;
2751 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2753 fl = hlist_entry(v, struct file_lock, fl_link);
2755 if (locks_translate_pid(fl, proc_pidns) == 0)
2758 lock_get_status(f, fl, iter->li_pos, "");
2760 list_for_each_entry(bfl, &fl->fl_blocked_requests, fl_blocked_member)
2761 lock_get_status(f, bfl, iter->li_pos, " ->");
2766 static void __show_fd_locks(struct seq_file *f,
2767 struct list_head *head, int *id,
2768 struct file *filp, struct files_struct *files)
2770 struct file_lock *fl;
2772 list_for_each_entry(fl, head, fl_list) {
2774 if (filp != fl->fl_file)
2776 if (fl->fl_owner != files &&
2777 fl->fl_owner != filp)
2781 seq_puts(f, "lock:\t");
2782 lock_get_status(f, fl, *id, "");
2786 void show_fd_locks(struct seq_file *f,
2787 struct file *filp, struct files_struct *files)
2789 struct inode *inode = locks_inode(filp);
2790 struct file_lock_context *ctx;
2793 ctx = smp_load_acquire(&inode->i_flctx);
2797 spin_lock(&ctx->flc_lock);
2798 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2799 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2800 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2801 spin_unlock(&ctx->flc_lock);
2804 static void *locks_start(struct seq_file *f, loff_t *pos)
2805 __acquires(&blocked_lock_lock)
2807 struct locks_iterator *iter = f->private;
2809 iter->li_pos = *pos + 1;
2810 percpu_down_write(&file_rwsem);
2811 spin_lock(&blocked_lock_lock);
2812 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2815 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2817 struct locks_iterator *iter = f->private;
2820 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2823 static void locks_stop(struct seq_file *f, void *v)
2824 __releases(&blocked_lock_lock)
2826 spin_unlock(&blocked_lock_lock);
2827 percpu_up_write(&file_rwsem);
2830 static const struct seq_operations locks_seq_operations = {
2831 .start = locks_start,
2837 static int __init proc_locks_init(void)
2839 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2840 sizeof(struct locks_iterator), NULL);
2843 fs_initcall(proc_locks_init);
2846 static int __init filelock_init(void)
2850 flctx_cache = kmem_cache_create("file_lock_ctx",
2851 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2853 filelock_cache = kmem_cache_create("file_lock_cache",
2854 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2857 for_each_possible_cpu(i) {
2858 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2860 spin_lock_init(&fll->lock);
2861 INIT_HLIST_HEAD(&fll->hlist);
2867 core_initcall(filelock_init);