Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Oct 2018 14:00:03 +0000 (15:00 +0100)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Oct 2018 14:00:03 +0000 (15:00 +0100)
Pull scheduler updates from Ingo Molnar:
 "The main changes are:

   - Migrate CPU-intense 'misfit' tasks on asymmetric capacity systems,
     to better utilize (much) faster 'big core' CPUs. (Morten Rasmussen,
     Valentin Schneider)

   - Topology handling improvements, in particular when CPU capacity
     changes and related load-balancing fixes/improvements (Morten
     Rasmussen)

   - ... plus misc other improvements, fixes and updates"

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (28 commits)
  sched/completions/Documentation: Add recommendation for dynamic and ONSTACK completions
  sched/completions/Documentation: Clean up the document some more
  sched/completions/Documentation: Fix a couple of punctuation nits
  cpu/SMT: State SMT is disabled even with nosmt and without "=force"
  sched/core: Fix comment regarding nr_iowait_cpu() and get_iowait_load()
  sched/fair: Remove setting task's se->runnable_weight during PELT update
  sched/fair: Disable LB_BIAS by default
  sched/pelt: Fix warning and clean up IRQ PELT config
  sched/topology: Make local variables static
  sched/debug: Use symbolic names for task state constants
  sched/numa: Remove unused numa_stats::nr_running field
  sched/numa: Remove unused code from update_numa_stats()
  sched/debug: Explicitly cast sched_feat() to bool
  sched/core: Disable SD_PREFER_SIBLING on asymmetric CPU capacity domains
  sched/fair: Don't move tasks to lower capacity CPUs unless necessary
  sched/fair: Set rq->rd->overload when misfit
  sched/fair: Wrap rq->rd->overload accesses with READ/WRITE_ONCE()
  sched/core: Change root_domain->overload type to int
  sched/fair: Change 'prefer_sibling' type to bool
  sched/fair: Kick nohz balance if rq->misfit_task_load
  ...

1  2 
kernel/cpu.c
kernel/sched/fair.c
kernel/sched/sched.h

diff --combined kernel/cpu.c
index be4859f0715313000b18e81c208029c1e4e0b93b,f1338452d9988ffff9b625f5043b106c322d229d..e82920b8bee14e62be2bb248d92413a85aff4aa9
@@@ -315,16 -315,6 +315,16 @@@ void lockdep_assert_cpus_held(void
        percpu_rwsem_assert_held(&cpu_hotplug_lock);
  }
  
 +static void lockdep_acquire_cpus_lock(void)
 +{
 +      rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_);
 +}
 +
 +static void lockdep_release_cpus_lock(void)
 +{
 +      rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_);
 +}
 +
  /*
   * Wait for currently running CPU hotplug operations to complete (if any) and
   * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
@@@ -354,17 -344,6 +354,17 @@@ void cpu_hotplug_enable(void
        cpu_maps_update_done();
  }
  EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 +
 +#else
 +
 +static void lockdep_acquire_cpus_lock(void)
 +{
 +}
 +
 +static void lockdep_release_cpus_lock(void)
 +{
 +}
 +
  #endif        /* CONFIG_HOTPLUG_CPU */
  
  #ifdef CONFIG_HOTPLUG_SMT
@@@ -383,6 -362,7 +383,7 @@@ void __init cpu_smt_disable(bool force
                pr_info("SMT: Force disabled\n");
                cpu_smt_control = CPU_SMT_FORCE_DISABLED;
        } else {
+               pr_info("SMT: disabled\n");
                cpu_smt_control = CPU_SMT_DISABLED;
        }
  }
@@@ -637,12 -617,6 +638,12 @@@ static void cpuhp_thread_fun(unsigned i
         */
        smp_mb();
  
 +      /*
 +       * The BP holds the hotplug lock, but we're now running on the AP,
 +       * ensure that anybody asserting the lock is held, will actually find
 +       * it so.
 +       */
 +      lockdep_acquire_cpus_lock();
        cpuhp_lock_acquire(bringup);
  
        if (st->single) {
        }
  
        cpuhp_lock_release(bringup);
 +      lockdep_release_cpus_lock();
  
        if (!st->should_run)
                complete_ap_thread(st, bringup);
diff --combined kernel/sched/fair.c
index 908c9cdae2f0c05d4a4956844249da4517e2ca97,d59307ecd67d2b197a8ca181bc816b2eca23e767..ee271bb661cc923dfa67ae5d5c45a18c71df7cb1
@@@ -693,6 -693,7 +693,7 @@@ static u64 sched_vslice(struct cfs_rq *
  
  static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
  static unsigned long task_h_load(struct task_struct *p);
+ static unsigned long capacity_of(int cpu);
  
  /* Give new sched_entity start runnable values to heavy its load in infant time */
  void init_entity_runnable_average(struct sched_entity *se)
@@@ -1392,17 -1393,6 +1393,17 @@@ bool should_numa_migrate_memory(struct 
        int last_cpupid, this_cpupid;
  
        this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
 +      last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
 +
 +      /*
 +       * Allow first faults or private faults to migrate immediately early in
 +       * the lifetime of a task. The magic number 4 is based on waiting for
 +       * two full passes of the "multi-stage node selection" test that is
 +       * executed below.
 +       */
 +      if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
 +          (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
 +              return true;
  
        /*
         * Multi-stage node selection is used in conjunction with a periodic
         * This quadric squishes small probabilities, making it less likely we
         * act on an unlikely task<->page relation.
         */
 -      last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
        if (!cpupid_pid_unset(last_cpupid) &&
                                cpupid_to_nid(last_cpupid) != dst_nid)
                return false;
  static unsigned long weighted_cpuload(struct rq *rq);
  static unsigned long source_load(int cpu, int type);
  static unsigned long target_load(int cpu, int type);
- static unsigned long capacity_of(int cpu);
  
  /* Cached statistics for all CPUs within a node */
  struct numa_stats {
  
        /* Total compute capacity of CPUs on a node */
        unsigned long compute_capacity;
-       unsigned int nr_running;
  };
  
  /*
   */
  static void update_numa_stats(struct numa_stats *ns, int nid)
  {
-       int smt, cpu, cpus = 0;
-       unsigned long capacity;
+       int cpu;
  
        memset(ns, 0, sizeof(*ns));
        for_each_cpu(cpu, cpumask_of_node(nid)) {
                struct rq *rq = cpu_rq(cpu);
  
-               ns->nr_running += rq->nr_running;
                ns->load += weighted_cpuload(rq);
                ns->compute_capacity += capacity_of(cpu);
-               cpus++;
        }
  
-       /*
-        * If we raced with hotplug and there are no CPUs left in our mask
-        * the @ns structure is NULL'ed and task_numa_compare() will
-        * not find this node attractive.
-        *
-        * We'll detect a huge imbalance and bail there.
-        */
-       if (!cpus)
-               return;
-       /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
-       smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
-       capacity = cpus / smt; /* cores */
-       capacity = min_t(unsigned, capacity,
-               DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
  }
  
  struct task_numa_env {
@@@ -3723,6 -3691,29 +3701,29 @@@ util_est_dequeue(struct cfs_rq *cfs_rq
        WRITE_ONCE(p->se.avg.util_est, ue);
  }
  
+ static inline int task_fits_capacity(struct task_struct *p, long capacity)
+ {
+       return capacity * 1024 > task_util_est(p) * capacity_margin;
+ }
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ {
+       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+               return;
+       if (!p) {
+               rq->misfit_task_load = 0;
+               return;
+       }
+       if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
+               rq->misfit_task_load = 0;
+               return;
+       }
+       rq->misfit_task_load = task_h_load(p);
+ }
  #else /* CONFIG_SMP */
  
  #define UPDATE_TG     0x0
@@@ -3752,6 -3743,7 +3753,7 @@@ util_est_enqueue(struct cfs_rq *cfs_rq
  static inline void
  util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
                 bool task_sleep) {}
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
  
  #endif /* CONFIG_SMP */
  
@@@ -4001,7 -3993,7 +4003,7 @@@ dequeue_entity(struct cfs_rq *cfs_rq, s
         * put back on, and if we advance min_vruntime, we'll be placed back
         * further than we started -- ie. we'll be penalized.
         */
 -      if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
 +      if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
                update_min_vruntime(cfs_rq);
  }
  
@@@ -4476,13 -4468,9 +4478,13 @@@ static void throttle_cfs_rq(struct cfs_
  
        /*
         * Add to the _head_ of the list, so that an already-started
 -       * distribute_cfs_runtime will not see us
 +       * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
 +       * not running add to the tail so that later runqueues don't get starved.
         */
 -      list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
 +      if (cfs_b->distribute_running)
 +              list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
 +      else
 +              list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
  
        /*
         * If we're the first throttled task, make sure the bandwidth
@@@ -4626,16 -4614,14 +4628,16 @@@ static int do_sched_cfs_period_timer(st
         * in us over-using our runtime if it is all used during this loop, but
         * only by limited amounts in that extreme case.
         */
 -      while (throttled && cfs_b->runtime > 0) {
 +      while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
                runtime = cfs_b->runtime;
 +              cfs_b->distribute_running = 1;
                raw_spin_unlock(&cfs_b->lock);
                /* we can't nest cfs_b->lock while distributing bandwidth */
                runtime = distribute_cfs_runtime(cfs_b, runtime,
                                                 runtime_expires);
                raw_spin_lock(&cfs_b->lock);
  
 +              cfs_b->distribute_running = 0;
                throttled = !list_empty(&cfs_b->throttled_cfs_rq);
  
                cfs_b->runtime -= min(runtime, cfs_b->runtime);
@@@ -4746,11 -4732,6 +4748,11 @@@ static void do_sched_cfs_slack_timer(st
  
        /* confirm we're still not at a refresh boundary */
        raw_spin_lock(&cfs_b->lock);
 +      if (cfs_b->distribute_running) {
 +              raw_spin_unlock(&cfs_b->lock);
 +              return;
 +      }
 +
        if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
                raw_spin_unlock(&cfs_b->lock);
                return;
                runtime = cfs_b->runtime;
  
        expires = cfs_b->runtime_expires;
 +      if (runtime)
 +              cfs_b->distribute_running = 1;
 +
        raw_spin_unlock(&cfs_b->lock);
  
        if (!runtime)
        raw_spin_lock(&cfs_b->lock);
        if (expires == cfs_b->runtime_expires)
                cfs_b->runtime -= min(runtime, cfs_b->runtime);
 +      cfs_b->distribute_running = 0;
        raw_spin_unlock(&cfs_b->lock);
  }
  
@@@ -4882,7 -4859,6 +4884,7 @@@ void init_cfs_bandwidth(struct cfs_band
        cfs_b->period_timer.function = sched_cfs_period_timer;
        hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        cfs_b->slack_timer.function = sched_cfs_slack_timer;
 +      cfs_b->distribute_running = 0;
  }
  
  static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
@@@ -6280,6 -6256,9 +6282,9 @@@ static int wake_cap(struct task_struct 
  {
        long min_cap, max_cap;
  
+       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+               return 0;
        min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
        max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
  
        /* Bring task utilization in sync with prev_cpu */
        sync_entity_load_avg(&p->se);
  
-       return min_cap * 1024 < task_util(p) * capacity_margin;
+       return !task_fits_capacity(p, min_cap);
  }
  
  /*
@@@ -6709,9 -6688,12 +6714,12 @@@ done: __maybe_unused
        if (hrtick_enabled(rq))
                hrtick_start_fair(rq, p);
  
+       update_misfit_status(p, rq);
        return p;
  
  idle:
+       update_misfit_status(NULL, rq);
        new_tasks = idle_balance(rq, rf);
  
        /*
@@@ -6917,6 -6899,13 +6925,13 @@@ static unsigned long __read_mostly max_
  
  enum fbq_type { regular, remote, all };
  
+ enum group_type {
+       group_other = 0,
+       group_misfit_task,
+       group_imbalanced,
+       group_overloaded,
+ };
  #define LBF_ALL_PINNED        0x01
  #define LBF_NEED_BREAK        0x02
  #define LBF_DST_PINNED  0x04
@@@ -6947,6 -6936,7 +6962,7 @@@ struct lb_env 
        unsigned int            loop_max;
  
        enum fbq_type           fbq_type;
+       enum group_type         src_grp_type;
        struct list_head        tasks;
  };
  
@@@ -7327,7 -7317,7 +7343,7 @@@ static inline bool others_have_blocked(
        if (READ_ONCE(rq->avg_dl.util_avg))
                return true;
  
- #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
        if (READ_ONCE(rq->avg_irq.util_avg))
                return true;
  #endif
@@@ -7490,12 -7480,6 +7506,6 @@@ static unsigned long task_h_load(struc
  
  /********** Helpers for find_busiest_group ************************/
  
- enum group_type {
-       group_other = 0,
-       group_imbalanced,
-       group_overloaded,
- };
  /*
   * sg_lb_stats - stats of a sched_group required for load_balancing
   */
@@@ -7511,6 -7495,7 +7521,7 @@@ struct sg_lb_stats 
        unsigned int group_weight;
        enum group_type group_type;
        int group_no_capacity;
+       unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
  #ifdef CONFIG_NUMA_BALANCING
        unsigned int nr_numa_running;
        unsigned int nr_preferred_running;
@@@ -7619,13 -7604,14 +7630,14 @@@ static void update_cpu_capacity(struct 
        cpu_rq(cpu)->cpu_capacity = capacity;
        sdg->sgc->capacity = capacity;
        sdg->sgc->min_capacity = capacity;
+       sdg->sgc->max_capacity = capacity;
  }
  
  void update_group_capacity(struct sched_domain *sd, int cpu)
  {
        struct sched_domain *child = sd->child;
        struct sched_group *group, *sdg = sd->groups;
-       unsigned long capacity, min_capacity;
+       unsigned long capacity, min_capacity, max_capacity;
        unsigned long interval;
  
        interval = msecs_to_jiffies(sd->balance_interval);
  
        capacity = 0;
        min_capacity = ULONG_MAX;
+       max_capacity = 0;
  
        if (child->flags & SD_OVERLAP) {
                /*
                        }
  
                        min_capacity = min(capacity, min_capacity);
+                       max_capacity = max(capacity, max_capacity);
                }
        } else  {
                /*
  
                        capacity += sgc->capacity;
                        min_capacity = min(sgc->min_capacity, min_capacity);
+                       max_capacity = max(sgc->max_capacity, max_capacity);
                        group = group->next;
                } while (group != child->groups);
        }
  
        sdg->sgc->capacity = capacity;
        sdg->sgc->min_capacity = min_capacity;
+       sdg->sgc->max_capacity = max_capacity;
  }
  
  /*
@@@ -7783,16 -7773,27 +7799,27 @@@ group_is_overloaded(struct lb_env *env
  }
  
  /*
-  * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+  * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
   * per-CPU capacity than sched_group ref.
   */
  static inline bool
- group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+ group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
  {
        return sg->sgc->min_capacity * capacity_margin <
                                                ref->sgc->min_capacity * 1024;
  }
  
+ /*
+  * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
+  * per-CPU capacity_orig than sched_group ref.
+  */
+ static inline bool
+ group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+ {
+       return sg->sgc->max_capacity * capacity_margin <
+                                               ref->sgc->max_capacity * 1024;
+ }
  static inline enum
  group_type group_classify(struct sched_group *group,
                          struct sg_lb_stats *sgs)
        if (sg_imbalanced(group))
                return group_imbalanced;
  
+       if (sgs->group_misfit_task_load)
+               return group_misfit_task;
        return group_other;
  }
  
@@@ -7835,7 -7839,7 +7865,7 @@@ static bool update_nohz_stats(struct r
   * @load_idx: Load index of sched_domain of this_cpu for load calc.
   * @local_group: Does group contain this_cpu.
   * @sgs: variable to hold the statistics for this group.
-  * @overload: Indicate more than one runnable task for any CPU.
+  * @overload: Indicate pullable load (e.g. >1 runnable task).
   */
  static inline void update_sg_lb_stats(struct lb_env *env,
                        struct sched_group *group, int load_idx,
                 */
                if (!nr_running && idle_cpu(i))
                        sgs->idle_cpus++;
+               if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
+                   sgs->group_misfit_task_load < rq->misfit_task_load) {
+                       sgs->group_misfit_task_load = rq->misfit_task_load;
+                       *overload = 1;
+               }
        }
  
        /* Adjust by relative CPU capacity of the group */
@@@ -7912,6 -7922,17 +7948,17 @@@ static bool update_sd_pick_busiest(stru
  {
        struct sg_lb_stats *busiest = &sds->busiest_stat;
  
+       /*
+        * Don't try to pull misfit tasks we can't help.
+        * We can use max_capacity here as reduction in capacity on some
+        * CPUs in the group should either be possible to resolve
+        * internally or be covered by avg_load imbalance (eventually).
+        */
+       if (sgs->group_type == group_misfit_task &&
+           (!group_smaller_max_cpu_capacity(sg, sds->local) ||
+            !group_has_capacity(env, &sds->local_stat)))
+               return false;
        if (sgs->group_type > busiest->group_type)
                return true;
  
         * power/energy consequences are not considered.
         */
        if (sgs->sum_nr_running <= sgs->group_weight &&
-           group_smaller_cpu_capacity(sds->local, sg))
+           group_smaller_min_cpu_capacity(sds->local, sg))
+               return false;
+       /*
+        * If we have more than one misfit sg go with the biggest misfit.
+        */
+       if (sgs->group_type == group_misfit_task &&
+           sgs->group_misfit_task_load < busiest->group_misfit_task_load)
                return false;
  
  asym_packing:
@@@ -8002,11 -8030,9 +8056,9 @@@ static inline void update_sd_lb_stats(s
        struct sched_group *sg = env->sd->groups;
        struct sg_lb_stats *local = &sds->local_stat;
        struct sg_lb_stats tmp_sgs;
-       int load_idx, prefer_sibling = 0;
+       int load_idx;
        bool overload = false;
-       if (child && child->flags & SD_PREFER_SIBLING)
-               prefer_sibling = 1;
+       bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
  
  #ifdef CONFIG_NO_HZ_COMMON
        if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
@@@ -8080,8 -8106,8 +8132,8 @@@ next_group
  
        if (!env->sd->parent) {
                /* update overload indicator if we are at root domain */
-               if (env->dst_rq->rd->overload != overload)
-                       env->dst_rq->rd->overload = overload;
+               if (READ_ONCE(env->dst_rq->rd->overload) != overload)
+                       WRITE_ONCE(env->dst_rq->rd->overload, overload);
        }
  }
  
@@@ -8231,8 -8257,9 +8283,9 @@@ static inline void calculate_imbalance(
         * factors in sg capacity and sgs with smaller group_type are
         * skipped when updating the busiest sg:
         */
-       if (busiest->avg_load <= sds->avg_load ||
-           local->avg_load >= sds->avg_load) {
+       if (busiest->group_type != group_misfit_task &&
+           (busiest->avg_load <= sds->avg_load ||
+            local->avg_load >= sds->avg_load)) {
                env->imbalance = 0;
                return fix_small_imbalance(env, sds);
        }
                (sds->avg_load - local->avg_load) * local->group_capacity
        ) / SCHED_CAPACITY_SCALE;
  
+       /* Boost imbalance to allow misfit task to be balanced. */
+       if (busiest->group_type == group_misfit_task) {
+               env->imbalance = max_t(long, env->imbalance,
+                                      busiest->group_misfit_task_load);
+       }
        /*
         * if *imbalance is less than the average load per runnable task
         * there is no guarantee that any tasks will be moved so we'll have
@@@ -8332,6 -8365,10 +8391,10 @@@ static struct sched_group *find_busiest
            busiest->group_no_capacity)
                goto force_balance;
  
+       /* Misfit tasks should be dealt with regardless of the avg load */
+       if (busiest->group_type == group_misfit_task)
+               goto force_balance;
        /*
         * If the local group is busier than the selected busiest group
         * don't try and pull any tasks.
  
  force_balance:
        /* Looks like there is an imbalance. Compute it */
+       env->src_grp_type = busiest->group_type;
        calculate_imbalance(env, &sds);
        return env->imbalance ? sds.busiest : NULL;
  
@@@ -8416,8 -8454,32 +8480,32 @@@ static struct rq *find_busiest_queue(st
                if (rt > env->fbq_type)
                        continue;
  
+               /*
+                * For ASYM_CPUCAPACITY domains with misfit tasks we simply
+                * seek the "biggest" misfit task.
+                */
+               if (env->src_grp_type == group_misfit_task) {
+                       if (rq->misfit_task_load > busiest_load) {
+                               busiest_load = rq->misfit_task_load;
+                               busiest = rq;
+                       }
+                       continue;
+               }
                capacity = capacity_of(i);
  
+               /*
+                * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
+                * eventually lead to active_balancing high->low capacity.
+                * Higher per-CPU capacity is considered better than balancing
+                * average load.
+                */
+               if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
+                   capacity_of(env->dst_cpu) < capacity &&
+                   rq->nr_running == 1)
+                       continue;
                wl = weighted_cpuload(rq);
  
                /*
@@@ -8485,6 -8547,9 +8573,9 @@@ static int need_active_balance(struct l
                        return 1;
        }
  
+       if (env->src_grp_type == group_misfit_task)
+               return 1;
        return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
  }
  
@@@ -9127,7 -9192,7 +9218,7 @@@ static void nohz_balancer_kick(struct r
        if (time_before(now, nohz.next_balance))
                goto out;
  
-       if (rq->nr_running >= 2) {
+       if (rq->nr_running >= 2 || rq->misfit_task_load) {
                flags = NOHZ_KICK_MASK;
                goto out;
        }
@@@ -9496,7 -9561,7 +9587,7 @@@ static int idle_balance(struct rq *this
        rq_unpin_lock(this_rq, rf);
  
        if (this_rq->avg_idle < sysctl_sched_migration_cost ||
-           !this_rq->rd->overload) {
+           !READ_ONCE(this_rq->rd->overload)) {
  
                rcu_read_lock();
                sd = rcu_dereference_check_sched_domain(this_rq->sd);
@@@ -9658,6 -9723,8 +9749,8 @@@ static void task_tick_fair(struct rq *r
  
        if (static_branch_unlikely(&sched_numa_balancing))
                task_tick_numa(rq, curr);
+       update_misfit_status(curr, rq);
  }
  
  /*
diff --combined kernel/sched/sched.h
index 9683f458aec72823dfed413b5dc0a262836aaf85,798b1afd50925fdf43943c5c4ad6358a1ca11680..fc24f2b8c6469bfc947d4a29a0de5e0284870e47
@@@ -346,8 -346,6 +346,8 @@@ struct cfs_bandwidth 
        int                     nr_periods;
        int                     nr_throttled;
        u64                     throttled_time;
 +
 +      bool                    distribute_running;
  #endif
  };
  
@@@ -717,8 -715,12 +717,12 @@@ struct root_domain 
        cpumask_var_t           span;
        cpumask_var_t           online;
  
-       /* Indicate more than one runnable task for any CPU */
-       bool                    overload;
+       /*
+        * Indicate pullable load on at least one CPU, e.g:
+        * - More than one runnable task
+        * - Running task is misfit
+        */
+       int                     overload;
  
        /*
         * The bit corresponding to a CPU gets set here if such CPU has more
@@@ -845,6 -847,8 +849,8 @@@ struct rq 
  
        unsigned char           idle_balance;
  
+       unsigned long           misfit_task_load;
        /* For active balancing */
        int                     active_balance;
        int                     push_cpu;
  
        struct sched_avg        avg_rt;
        struct sched_avg        avg_dl;
- #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
- #define HAVE_SCHED_AVG_IRQ
+ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
        struct sched_avg        avg_irq;
  #endif
        u64                     idle_stamp;
@@@ -1188,6 -1191,7 +1193,7 @@@ DECLARE_PER_CPU(int, sd_llc_id)
  DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
  DECLARE_PER_CPU(struct sched_domain *, sd_numa);
  DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+ extern struct static_key_false sched_asym_cpucapacity;
  
  struct sched_group_capacity {
        atomic_t                ref;
         */
        unsigned long           capacity;
        unsigned long           min_capacity;           /* Min per-CPU capacity in group */
+       unsigned long           max_capacity;           /* Max per-CPU capacity in group */
        unsigned long           next_update;
        int                     imbalance;              /* XXX unrelated to capacity but shared group state */
  
@@@ -1396,7 -1401,7 +1403,7 @@@ static const_debug __maybe_unused unsig
        0;
  #undef SCHED_FEAT
  
- #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+ #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
  
  #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
  
@@@ -1696,8 -1701,8 +1703,8 @@@ static inline void add_nr_running(struc
  
        if (prev_nr < 2 && rq->nr_running >= 2) {
  #ifdef CONFIG_SMP
-               if (!rq->rd->overload)
-                       rq->rd->overload = true;
+               if (!READ_ONCE(rq->rd->overload))
+                       WRITE_ONCE(rq->rd->overload, 1);
  #endif
        }
  
@@@ -2217,7 -2222,7 +2224,7 @@@ static inline unsigned long cpu_util_rt
  }
  #endif
  
- #ifdef HAVE_SCHED_AVG_IRQ
+ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
  static inline unsigned long cpu_util_irq(struct rq *rq)
  {
        return rq->avg_irq.util_avg;