Merge tag 'trace-v5.12-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 31 Mar 2021 17:14:55 +0000 (10:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 31 Mar 2021 17:14:55 +0000 (10:14 -0700)
Pull ftrace fix from Steven Rostedt:
 "Add check of order < 0 before calling free_pages()

  The function addresses that are traced by ftrace are stored in pages,
  and the size is held in a variable. If there's some error in creating
  them, the allocate ones will be freed. In this case, it is possible
  that the order of pages to be freed may end up being negative due to a
  size of zero passed to get_count_order(), and then that negative
  number will cause free_pages() to free a very large section.

  Make sure that does not happen"

* tag 'trace-v5.12-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  ftrace: Check if pages were allocated before calling free_pages()

1  2 
kernel/trace/ftrace.c

diff --combined kernel/trace/ftrace.c
index b7e29db127fa2830b28b79e3189505286c20903c,12223132eff4d7959344e948f959f5ecf2b80944..3ba52d4e1314228147112047aa497709dbe6ba59
@@@ -3231,7 -3231,8 +3231,8 @@@ ftrace_allocate_pages(unsigned long num
        pg = start_pg;
        while (pg) {
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                start_pg = pg->next;
                kfree(pg);
                pg = start_pg;
@@@ -5045,20 -5046,6 +5046,20 @@@ struct ftrace_direct_func *ftrace_find_
        return NULL;
  }
  
 +static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
 +{
 +      struct ftrace_direct_func *direct;
 +
 +      direct = kmalloc(sizeof(*direct), GFP_KERNEL);
 +      if (!direct)
 +              return NULL;
 +      direct->addr = addr;
 +      direct->count = 0;
 +      list_add_rcu(&direct->next, &ftrace_direct_funcs);
 +      ftrace_direct_func_count++;
 +      return direct;
 +}
 +
  /**
   * register_ftrace_direct - Call a custom trampoline directly
   * @ip: The address of the nop at the beginning of a function
@@@ -5134,11 -5121,15 +5135,11 @@@ int register_ftrace_direct(unsigned lon
  
        direct = ftrace_find_direct_func(addr);
        if (!direct) {
 -              direct = kmalloc(sizeof(*direct), GFP_KERNEL);
 +              direct = ftrace_alloc_direct_func(addr);
                if (!direct) {
                        kfree(entry);
                        goto out_unlock;
                }
 -              direct->addr = addr;
 -              direct->count = 0;
 -              list_add_rcu(&direct->next, &ftrace_direct_funcs);
 -              ftrace_direct_func_count++;
        }
  
        entry->ip = ip;
@@@ -5339,7 -5330,6 +5340,7 @@@ int __weak ftrace_modify_direct_caller(
  int modify_ftrace_direct(unsigned long ip,
                         unsigned long old_addr, unsigned long new_addr)
  {
 +      struct ftrace_direct_func *direct, *new_direct = NULL;
        struct ftrace_func_entry *entry;
        struct dyn_ftrace *rec;
        int ret = -ENODEV;
        if (entry->direct != old_addr)
                goto out_unlock;
  
 +      direct = ftrace_find_direct_func(old_addr);
 +      if (WARN_ON(!direct))
 +              goto out_unlock;
 +      if (direct->count > 1) {
 +              ret = -ENOMEM;
 +              new_direct = ftrace_alloc_direct_func(new_addr);
 +              if (!new_direct)
 +                      goto out_unlock;
 +              direct->count--;
 +              new_direct->count++;
 +      } else {
 +              direct->addr = new_addr;
 +      }
 +
        /*
         * If there's no other ftrace callback on the rec->ip location,
         * then it can be changed directly by the architecture.
                ret = 0;
        }
  
 +      if (unlikely(ret && new_direct)) {
 +              direct->count++;
 +              list_del_rcu(&new_direct->next);
 +              synchronize_rcu_tasks();
 +              kfree(new_direct);
 +              ftrace_direct_func_count--;
 +      }
 +
   out_unlock:
        mutex_unlock(&ftrace_lock);
        mutex_unlock(&direct_mutex);
@@@ -6451,7 -6419,8 +6452,8 @@@ void ftrace_release_mod(struct module *
                clear_mod_from_hashes(pg);
  
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                tmp_page = pg->next;
                kfree(pg);
                ftrace_number_of_pages -= 1 << order;
@@@ -6811,7 -6780,8 +6813,8 @@@ void ftrace_free_mem(struct module *mod
                if (!pg->index) {
                        *last_pg = pg->next;
                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-                       free_pages((unsigned long)pg->records, order);
+                       if (order >= 0)
+                               free_pages((unsigned long)pg->records, order);
                        ftrace_number_of_pages -= 1 << order;
                        ftrace_number_of_groups--;
                        kfree(pg);