mm: move MAP_SYNC to asm-generic/mman-common.h
[linux-2.6-block.git] / kernel / jump_label.c
index bad96b476eb6eb13c1d7f1774a1348506d0e5f10..df3008419a1d0a34229580d86a2aec2f0e305d2a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * jump label support
  *
@@ -36,12 +37,26 @@ static int jump_label_cmp(const void *a, const void *b)
        const struct jump_entry *jea = a;
        const struct jump_entry *jeb = b;
 
+       /*
+        * Entrires are sorted by key.
+        */
        if (jump_entry_key(jea) < jump_entry_key(jeb))
                return -1;
 
        if (jump_entry_key(jea) > jump_entry_key(jeb))
                return 1;
 
+       /*
+        * In the batching mode, entries should also be sorted by the code
+        * inside the already sorted list of entries, enabling a bsearch in
+        * the vector.
+        */
+       if (jump_entry_code(jea) < jump_entry_code(jeb))
+               return -1;
+
+       if (jump_entry_code(jea) > jump_entry_code(jeb))
+               return 1;
+
        return 0;
 }
 
@@ -202,11 +217,13 @@ void static_key_disable(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_disable);
 
-static void __static_key_slow_dec_cpuslocked(struct static_key *key,
-                                          unsigned long rate_limit,
-                                          struct delayed_work *work)
+static bool static_key_slow_try_dec(struct static_key *key)
 {
-       lockdep_assert_cpus_held();
+       int val;
+
+       val = atomic_fetch_add_unless(&key->enabled, -1, 1);
+       if (val == 1)
+               return false;
 
        /*
         * The negative count check is valid even when a negative
@@ -215,63 +232,70 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
         * returns is unbalanced, because all other static_key_slow_inc()
         * instances block while the update is in progress.
         */
-       if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
-               WARN(atomic_read(&key->enabled) < 0,
-                    "jump label: negative count!\n");
+       WARN(val < 0, "jump label: negative count!\n");
+       return true;
+}
+
+static void __static_key_slow_dec_cpuslocked(struct static_key *key)
+{
+       lockdep_assert_cpus_held();
+
+       if (static_key_slow_try_dec(key))
                return;
-       }
 
-       if (rate_limit) {
-               atomic_inc(&key->enabled);
-               schedule_delayed_work(work, rate_limit);
-       } else {
+       jump_label_lock();
+       if (atomic_dec_and_test(&key->enabled))
                jump_label_update(key);
-       }
        jump_label_unlock();
 }
 
-static void __static_key_slow_dec(struct static_key *key,
-                                 unsigned long rate_limit,
-                                 struct delayed_work *work)
+static void __static_key_slow_dec(struct static_key *key)
 {
        cpus_read_lock();
-       __static_key_slow_dec_cpuslocked(key, rate_limit, work);
+       __static_key_slow_dec_cpuslocked(key);
        cpus_read_unlock();
 }
 
-static void jump_label_update_timeout(struct work_struct *work)
+void jump_label_update_timeout(struct work_struct *work)
 {
        struct static_key_deferred *key =
                container_of(work, struct static_key_deferred, work.work);
-       __static_key_slow_dec(&key->key, 0, NULL);
+       __static_key_slow_dec(&key->key);
 }
+EXPORT_SYMBOL_GPL(jump_label_update_timeout);
 
 void static_key_slow_dec(struct static_key *key)
 {
        STATIC_KEY_CHECK_USE(key);
-       __static_key_slow_dec(key, 0, NULL);
+       __static_key_slow_dec(key);
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 
 void static_key_slow_dec_cpuslocked(struct static_key *key)
 {
        STATIC_KEY_CHECK_USE(key);
-       __static_key_slow_dec_cpuslocked(key, 0, NULL);
+       __static_key_slow_dec_cpuslocked(key);
 }
 
-void static_key_slow_dec_deferred(struct static_key_deferred *key)
+void __static_key_slow_dec_deferred(struct static_key *key,
+                                   struct delayed_work *work,
+                                   unsigned long timeout)
 {
        STATIC_KEY_CHECK_USE(key);
-       __static_key_slow_dec(&key->key, key->timeout, &key->work);
+
+       if (static_key_slow_try_dec(key))
+               return;
+
+       schedule_delayed_work(work, timeout);
 }
-EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
 
-void static_key_deferred_flush(struct static_key_deferred *key)
+void __static_key_deferred_flush(void *key, struct delayed_work *work)
 {
        STATIC_KEY_CHECK_USE(key);
-       flush_delayed_work(&key->work);
+       flush_delayed_work(work);
 }
-EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
 
 void jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
@@ -374,25 +398,55 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry)
        return enabled ^ branch;
 }
 
+static bool jump_label_can_update(struct jump_entry *entry, bool init)
+{
+       /*
+        * Cannot update code that was in an init text area.
+        */
+       if (!init && jump_entry_is_init(entry))
+               return false;
+
+       if (!kernel_text_address(jump_entry_code(entry))) {
+               WARN_ONCE(1, "can't patch jump_label at %pS", (void *)jump_entry_code(entry));
+               return false;
+       }
+
+       return true;
+}
+
+#ifndef HAVE_JUMP_LABEL_BATCH
 static void __jump_label_update(struct static_key *key,
                                struct jump_entry *entry,
                                struct jump_entry *stop,
                                bool init)
 {
        for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
-               /*
-                * An entry->code of 0 indicates an entry which has been
-                * disabled because it was in an init text area.
-                */
-               if (init || !jump_entry_is_init(entry)) {
-                       if (kernel_text_address(jump_entry_code(entry)))
-                               arch_jump_label_transform(entry, jump_label_type(entry));
-                       else
-                               WARN_ONCE(1, "can't patch jump_label at %pS",
-                                         (void *)jump_entry_code(entry));
+               if (jump_label_can_update(entry, init))
+                       arch_jump_label_transform(entry, jump_label_type(entry));
+       }
+}
+#else
+static void __jump_label_update(struct static_key *key,
+                               struct jump_entry *entry,
+                               struct jump_entry *stop,
+                               bool init)
+{
+       for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
+
+               if (!jump_label_can_update(entry, init))
+                       continue;
+
+               if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
+                       /*
+                        * Queue is full: Apply the current queue and try again.
+                        */
+                       arch_jump_label_transform_apply();
+                       BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
                }
        }
+       arch_jump_label_transform_apply();
 }
+#endif
 
 void __init jump_label_init(void)
 {