sched/pelt: Fix warning and clean up IRQ PELT config
authorVincent Guittot <vincent.guittot@linaro.org>
Tue, 25 Sep 2018 09:17:42 +0000 (11:17 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 2 Oct 2018 07:45:00 +0000 (09:45 +0200)
Create a config for enabling irq load tracking in the scheduler.
irq load tracking is useful only when irq or paravirtual time is
accounted but it's only possible with SMP for now.

Also use __maybe_unused to remove the compilation warning in
update_rq_clock_task() that has been introduced by:

  2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")

Suggested-by: Ingo Molnar <mingo@redhat.com>
Reported-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
Reported-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bp@alien8.de
Cc: dou_liyang@163.com
Fixes: 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
Link: http://lkml.kernel.org/r/1537867062-27285-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
init/Kconfig
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/pelt.c
kernel/sched/pelt.h
kernel/sched/sched.h

index 1e234e2f1cba7a0f1ca9e2ae18868d50e1fe4aaf..317d5ccb51911c8cdb30f28f3ba71ad6e42a9e65 100644 (file)
@@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
 
          If in doubt, say N here.
 
+config HAVE_SCHED_AVG_IRQ
+       def_bool y
+       depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
+       depends on SMP
+
 config BSD_PROCESS_ACCT
        bool "BSD Process Accounting"
        depends on MULTIUSER
index ad97f3ba5ec51c4a9379228b60416c72e6dc5b60..f2caf1bae4a3989d27ba8f934f04a8adf8f4df32 100644 (file)
@@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
  * In theory, the compile should just see 0 here, and optimize out the call
  * to sched_rt_avg_update. But I don't trust it...
  */
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-       s64 steal = 0, irq_delta = 0;
-#endif
+       s64 __maybe_unused steal = 0, irq_delta = 0;
+
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
        irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
 
@@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
 
        rq->clock_task += delta;
 
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
        if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
                update_irq_load_avg(rq, irq_delta + steal);
 #endif
index 1d92ed2eca8b1deb587652c206d9345810cd4e23..d59307ecd67d2b197a8ca181bc816b2eca23e767 100644 (file)
@@ -7317,7 +7317,7 @@ static inline bool others_have_blocked(struct rq *rq)
        if (READ_ONCE(rq->avg_dl.util_avg))
                return true;
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
        if (READ_ONCE(rq->avg_irq.util_avg))
                return true;
 #endif
index 35475c0c5419c4c16f98948a0d17dd6d8b100132..48a1264864355fa1d64bafc7e8071535847d42f9 100644 (file)
@@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
        return 0;
 }
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 /*
  * irq:
  *
index d2894db28955bf682b5d7300be35404de284c8ae..7e56b489ff324a45157321761e5b360a6f26ff92 100644 (file)
@@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 int update_irq_load_avg(struct rq *rq, u64 running);
 #else
 static inline int
index 632804fa0b125e83fd62e9e4056386c052f697ac..798b1afd50925fdf43943c5c4ad6358a1ca11680 100644 (file)
@@ -862,8 +862,7 @@ struct rq {
 
        struct sched_avg        avg_rt;
        struct sched_avg        avg_dl;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-#define HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
        struct sched_avg        avg_irq;
 #endif
        u64                     idle_stamp;
@@ -2223,7 +2222,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
 }
 #endif
 
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 static inline unsigned long cpu_util_irq(struct rq *rq)
 {
        return rq->avg_irq.util_avg;