Merge tag 'linux-watchdog-5.4-rc1' of git://www.linux-watchdog.org/linux-watchdog
[linux-2.6-block.git] / mm / mmu_notifier.c
index 9e2125ae10a5f4e8914bf073d38ab5527589ad6f..7fde88695f35d62c1f22ffe1cc724ffe85ea92e9 100644 (file)
 /* global SRCU for all MMs */
 DEFINE_STATIC_SRCU(srcu);
 
+#ifdef CONFIG_LOCKDEP
+struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
+       .name = "mmu_notifier_invalidate_range_start"
+};
+#endif
+
 /*
  * This function can't run concurrently against mmu_notifier_register
  * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
@@ -162,7 +168,13 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->invalidate_range_start) {
-                       int _ret = mn->ops->invalidate_range_start(mn, range);
+                       int _ret;
+
+                       if (!mmu_notifier_range_blockable(range))
+                               non_block_start();
+                       _ret = mn->ops->invalidate_range_start(mn, range);
+                       if (!mmu_notifier_range_blockable(range))
+                               non_block_end();
                        if (_ret) {
                                pr_info("%pS callback failed with %d in %sblockable context.\n",
                                        mn->ops->invalidate_range_start, _ret,
@@ -184,6 +196,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
        struct mmu_notifier *mn;
        int id;
 
+       lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
                /*
@@ -203,10 +216,16 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
                        mn->ops->invalidate_range(mn, range->mm,
                                                  range->start,
                                                  range->end);
-               if (mn->ops->invalidate_range_end)
+               if (mn->ops->invalidate_range_end) {
+                       if (!mmu_notifier_range_blockable(range))
+                               non_block_start();
                        mn->ops->invalidate_range_end(mn, range);
+                       if (!mmu_notifier_range_blockable(range))
+                               non_block_end();
+               }
        }
        srcu_read_unlock(&srcu, id);
+       lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 }
 
 void __mmu_notifier_invalidate_range(struct mm_struct *mm,
@@ -235,6 +254,13 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
        lockdep_assert_held_write(&mm->mmap_sem);
        BUG_ON(atomic_read(&mm->mm_users) <= 0);
 
+       if (IS_ENABLED(CONFIG_LOCKDEP)) {
+               fs_reclaim_acquire(GFP_KERNEL);
+               lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+               lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+               fs_reclaim_release(GFP_KERNEL);
+       }
+
        mn->mm = mm;
        mn->users = 1;