Merge branches 'pm-clk', 'pm-domains' and 'powercap'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 18 Jun 2015 23:18:30 +0000 (01:18 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 18 Jun 2015 23:18:30 +0000 (01:18 +0200)
* pm-clk:
  PM / clk: Print acquired clock name in addition to con_id
  PM / clk: Fix clock error check in __pm_clk_add()
  drivers: sh: remove boilerplate code and use USE_PM_CLK_RUNTIME_OPS
  arm: davinci: remove boilerplate code and use USE_PM_CLK_RUNTIME_OPS
  arm: omap1: remove boilerplate code and use USE_PM_CLK_RUNTIME_OPS
  arm: keystone: remove boilerplate code and use USE_PM_CLK_RUNTIME_OPS
  PM / clock_ops: Provide default runtime ops to users

* pm-domains:
  PM / Domains: Skip timings during syscore suspend/resume

* powercap:
  powercap / RAPL: Support Knights Landing
  powercap / RAPL: Floor frequency setting in Atom SoC

arch/arm/mach-davinci/pm_domain.c
arch/arm/mach-keystone/pm_domain.c
arch/arm/mach-omap1/pm_bus.c
drivers/base/power/clock_ops.c
drivers/base/power/domain.c
drivers/powercap/intel_rapl.c
drivers/sh/pm_runtime.c
include/linux/pm_clock.h

index 641edc31393850dcb134f6c3aba2dd9924787aff..78eac2c0c146bb91a7b51965e2685e3106f67eef 100644 (file)
 #include <linux/pm_clock.h>
 #include <linux/platform_device.h>
 
-#ifdef CONFIG_PM
-static int davinci_pm_runtime_suspend(struct device *dev)
-{
-       int ret;
-
-       dev_dbg(dev, "%s\n", __func__);
-
-       ret = pm_generic_runtime_suspend(dev);
-       if (ret)
-               return ret;
-
-       ret = pm_clk_suspend(dev);
-       if (ret) {
-               pm_generic_runtime_resume(dev);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int davinci_pm_runtime_resume(struct device *dev)
-{
-       dev_dbg(dev, "%s\n", __func__);
-
-       pm_clk_resume(dev);
-       return pm_generic_runtime_resume(dev);
-}
-#endif
-
 static struct dev_pm_domain davinci_pm_domain = {
        .ops = {
-               SET_RUNTIME_PM_OPS(davinci_pm_runtime_suspend,
-                                  davinci_pm_runtime_resume, NULL)
+               USE_PM_CLK_RUNTIME_OPS
                USE_PLATFORM_PM_SLEEP_OPS
        },
 };
index 41bebfd296dcbacac7a9d0c60747d5b1c01c73d4..edea697e8253d601ce2323a0a99c1083d1e9ff93 100644 (file)
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 
-#ifdef CONFIG_PM
-static int keystone_pm_runtime_suspend(struct device *dev)
-{
-       int ret;
-
-       dev_dbg(dev, "%s\n", __func__);
-
-       ret = pm_generic_runtime_suspend(dev);
-       if (ret)
-               return ret;
-
-       ret = pm_clk_suspend(dev);
-       if (ret) {
-               pm_generic_runtime_resume(dev);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int keystone_pm_runtime_resume(struct device *dev)
-{
-       dev_dbg(dev, "%s\n", __func__);
-
-       pm_clk_resume(dev);
-
-       return pm_generic_runtime_resume(dev);
-}
-#endif
-
 static struct dev_pm_domain keystone_pm_domain = {
        .ops = {
-               SET_RUNTIME_PM_OPS(keystone_pm_runtime_suspend,
-                                  keystone_pm_runtime_resume, NULL)
+               USE_PM_CLK_RUNTIME_OPS
                USE_PLATFORM_PM_SLEEP_OPS
        },
 };
index c40e209de65c0fe6816a4dc31bc737335d8b71ca..667c1637ff9198b5aa539c10b6ae83352ad3afda 100644 (file)
 
 #include "soc.h"
 
-#ifdef CONFIG_PM
-static int omap1_pm_runtime_suspend(struct device *dev)
-{
-       int ret;
-
-       dev_dbg(dev, "%s\n", __func__);
-
-       ret = pm_generic_runtime_suspend(dev);
-       if (ret)
-               return ret;
-
-       ret = pm_clk_suspend(dev);
-       if (ret) {
-               pm_generic_runtime_resume(dev);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int omap1_pm_runtime_resume(struct device *dev)
-{
-       dev_dbg(dev, "%s\n", __func__);
-
-       pm_clk_resume(dev);
-       return pm_generic_runtime_resume(dev);
-}
-
 static struct dev_pm_domain default_pm_domain = {
        .ops = {
-               .runtime_suspend = omap1_pm_runtime_suspend,
-               .runtime_resume = omap1_pm_runtime_resume,
+               USE_PM_CLK_RUNTIME_OPS
                USE_PLATFORM_PM_SLEEP_OPS
        },
 };
-#define OMAP1_PM_DOMAIN (&default_pm_domain)
-#else
-#define OMAP1_PM_DOMAIN NULL
-#endif /* CONFIG_PM */
 
 static struct pm_clk_notifier_block platform_bus_notifier = {
-       .pm_domain = OMAP1_PM_DOMAIN,
+       .pm_domain = &default_pm_domain,
        .con_ids = { "ick", "fck", NULL, },
 };
 
index 7fdd0172605afe1be1f74fa90593932a97153787..acef9f9f759a2530629d4a43fd5553290c6d6c54 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/clkdev.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/pm_runtime.h>
 
 #ifdef CONFIG_PM
 
@@ -67,7 +68,8 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
        } else {
                clk_prepare(ce->clk);
                ce->status = PCE_STATUS_ACQUIRED;
-               dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
+               dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+                       ce->clk, ce->con_id);
        }
 }
 
@@ -93,7 +95,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
                        return -ENOMEM;
                }
        } else {
-               if (IS_ERR(ce->clk) || !__clk_get(clk)) {
+               if (IS_ERR(clk) || !__clk_get(clk)) {
                        kfree(ce);
                        return -ENOENT;
                }
@@ -367,6 +369,43 @@ static int pm_clk_notify(struct notifier_block *nb,
        return 0;
 }
 
+int pm_clk_runtime_suspend(struct device *dev)
+{
+       int ret;
+
+       dev_dbg(dev, "%s\n", __func__);
+
+       ret = pm_generic_runtime_suspend(dev);
+       if (ret) {
+               dev_err(dev, "failed to suspend device\n");
+               return ret;
+       }
+
+       ret = pm_clk_suspend(dev);
+       if (ret) {
+               dev_err(dev, "failed to suspend clock\n");
+               pm_generic_runtime_resume(dev);
+               return ret;
+       }
+
+       return 0;
+}
+
+int pm_clk_runtime_resume(struct device *dev)
+{
+       int ret;
+
+       dev_dbg(dev, "%s\n", __func__);
+
+       ret = pm_clk_resume(dev);
+       if (ret) {
+               dev_err(dev, "failed to resume clock\n");
+               return ret;
+       }
+
+       return pm_generic_runtime_resume(dev);
+}
+
 #else /* !CONFIG_PM */
 
 /**
index 2327613d453929db41e605df6614bc94a9b91067..cdd547bd67df82184dd201b4e1cf9626cf90dd2e 100644 (file)
@@ -181,7 +181,7 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
        genpd->cpuidle_data->idle_state->exit_latency = usecs64;
 }
 
-static int genpd_power_on(struct generic_pm_domain *genpd)
+static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 {
        ktime_t time_start;
        s64 elapsed_ns;
@@ -190,6 +190,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd)
        if (!genpd->power_on)
                return 0;
 
+       if (!timed)
+               return genpd->power_on(genpd);
+
        time_start = ktime_get();
        ret = genpd->power_on(genpd);
        if (ret)
@@ -208,7 +211,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd)
        return ret;
 }
 
-static int genpd_power_off(struct generic_pm_domain *genpd)
+static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 {
        ktime_t time_start;
        s64 elapsed_ns;
@@ -217,6 +220,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd)
        if (!genpd->power_off)
                return 0;
 
+       if (!timed)
+               return genpd->power_off(genpd);
+
        time_start = ktime_get();
        ret = genpd->power_off(genpd);
        if (ret == -EBUSY)
@@ -305,7 +311,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
                }
        }
 
-       ret = genpd_power_on(genpd);
+       ret = genpd_power_on(genpd, true);
        if (ret)
                goto err;
 
@@ -615,7 +621,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
                 * the pm_genpd_poweron() restore power for us (this shouldn't
                 * happen very often).
                 */
-               ret = genpd_power_off(genpd);
+               ret = genpd_power_off(genpd, true);
                if (ret == -EBUSY) {
                        genpd_set_active(genpd);
                        goto out;
@@ -827,6 +833,7 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
 /**
  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
  * @genpd: PM domain to power off, if possible.
+ * @timed: True if latency measurements are allowed.
  *
  * Check if the given PM domain can be powered off (during system suspend or
  * hibernation) and do that if so.  Also, in that case propagate to its masters.
@@ -836,7 +843,8 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
  * executed sequentially, so it is guaranteed that it will never run twice in
  * parallel).
  */
-static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
+                                  bool timed)
 {
        struct gpd_link *link;
 
@@ -847,26 +855,28 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
            || atomic_read(&genpd->sd_count) > 0)
                return;
 
-       genpd_power_off(genpd);
+       genpd_power_off(genpd, timed);
 
        genpd->status = GPD_STATE_POWER_OFF;
 
        list_for_each_entry(link, &genpd->slave_links, slave_node) {
                genpd_sd_counter_dec(link->master);
-               pm_genpd_sync_poweroff(link->master);
+               pm_genpd_sync_poweroff(link->master, timed);
        }
 }
 
 /**
  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
  * @genpd: PM domain to power on.
+ * @timed: True if latency measurements are allowed.
  *
  * This function is only called in "noirq" and "syscore" stages of system power
  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
  * executed sequentially, so it is guaranteed that it will never run twice in
  * parallel).
  */
-static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
+static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
+                                 bool timed)
 {
        struct gpd_link *link;
 
@@ -874,11 +884,11 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
                return;
 
        list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               pm_genpd_sync_poweron(link->master);
+               pm_genpd_sync_poweron(link->master, timed);
                genpd_sd_counter_inc(link->master);
        }
 
-       genpd_power_on(genpd);
+       genpd_power_on(genpd, timed);
 
        genpd->status = GPD_STATE_ACTIVE;
 }
@@ -1056,7 +1066,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
         * the same PM domain, so it is not necessary to use locking here.
         */
        genpd->suspended_count++;
-       pm_genpd_sync_poweroff(genpd);
+       pm_genpd_sync_poweroff(genpd, true);
 
        return 0;
 }
@@ -1086,7 +1096,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
         * guaranteed that this function will never run twice in parallel for
         * the same PM domain, so it is not necessary to use locking here.
         */
-       pm_genpd_sync_poweron(genpd);
+       pm_genpd_sync_poweron(genpd, true);
        genpd->suspended_count--;
 
        return genpd_start_dev(genpd, dev);
@@ -1300,7 +1310,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
                         * If the domain was off before the hibernation, make
                         * sure it will be off going forward.
                         */
-                       genpd_power_off(genpd);
+                       genpd_power_off(genpd, true);
 
                        return 0;
                }
@@ -1309,7 +1319,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
        if (genpd->suspend_power_off)
                return 0;
 
-       pm_genpd_sync_poweron(genpd);
+       pm_genpd_sync_poweron(genpd, true);
 
        return genpd_start_dev(genpd, dev);
 }
@@ -1367,9 +1377,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
 
        if (suspend) {
                genpd->suspended_count++;
-               pm_genpd_sync_poweroff(genpd);
+               pm_genpd_sync_poweroff(genpd, false);
        } else {
-               pm_genpd_sync_poweron(genpd);
+               pm_genpd_sync_poweron(genpd, false);
                genpd->suspended_count--;
        }
 }
index fd243231620ac10554b0b2616792c00e189d970d..482b22ddc7b2cc424da112aafa6b17065e75d927 100644 (file)
@@ -187,6 +187,7 @@ struct rapl_package {
 };
 
 struct rapl_defaults {
+       u8 floor_freq_reg_addr;
        int (*check_unit)(struct rapl_package *rp, int cpu);
        void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
        u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
@@ -196,7 +197,8 @@ struct rapl_defaults {
 static struct rapl_defaults *rapl_defaults;
 
 /* Sideband MBI registers */
-#define IOSF_CPU_POWER_BUDGET_CTL (0x2)
+#define IOSF_CPU_POWER_BUDGET_CTL_BYT (0x2)
+#define IOSF_CPU_POWER_BUDGET_CTL_TNG (0xdf)
 
 #define PACKAGE_PLN_INT_SAVED   BIT(0)
 #define MAX_PRIM_NAME (32)
@@ -358,7 +360,8 @@ static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
 
        get_online_cpus();
        rapl_write_data_raw(rd, PL1_ENABLE, mode);
-       rapl_defaults->set_floor_freq(rd, mode);
+       if (rapl_defaults->set_floor_freq)
+               rapl_defaults->set_floor_freq(rd, mode);
        put_online_cpus();
 
        return 0;
@@ -979,16 +982,22 @@ static void set_floor_freq_atom(struct rapl_domain *rd, bool enable)
        static u32 power_ctrl_orig_val;
        u32 mdata;
 
+       if (!rapl_defaults->floor_freq_reg_addr) {
+               pr_err("Invalid floor frequency config register\n");
+               return;
+       }
+
        if (!power_ctrl_orig_val)
                iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_PMC_READ,
-                       IOSF_CPU_POWER_BUDGET_CTL, &power_ctrl_orig_val);
+                       rapl_defaults->floor_freq_reg_addr,
+                               &power_ctrl_orig_val);
        mdata = power_ctrl_orig_val;
        if (enable) {
                mdata &= ~(0x7f << 8);
                mdata |= 1 << 8;
        }
        iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_PMC_WRITE,
-               IOSF_CPU_POWER_BUDGET_CTL, mdata);
+               rapl_defaults->floor_freq_reg_addr, mdata);
 }
 
 static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
@@ -1029,6 +1038,7 @@ static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value,
 }
 
 static const struct rapl_defaults rapl_defaults_core = {
+       .floor_freq_reg_addr = 0,
        .check_unit = rapl_check_unit_core,
        .set_floor_freq = set_floor_freq_default,
        .compute_time_window = rapl_compute_time_window_core,
@@ -1041,12 +1051,34 @@ static const struct rapl_defaults rapl_defaults_hsw_server = {
        .dram_domain_energy_unit = 15300,
 };
 
-static const struct rapl_defaults rapl_defaults_atom = {
+static const struct rapl_defaults rapl_defaults_byt = {
+       .floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_BYT,
+       .check_unit = rapl_check_unit_atom,
+       .set_floor_freq = set_floor_freq_atom,
+       .compute_time_window = rapl_compute_time_window_atom,
+};
+
+static const struct rapl_defaults rapl_defaults_tng = {
+       .floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_TNG,
        .check_unit = rapl_check_unit_atom,
        .set_floor_freq = set_floor_freq_atom,
        .compute_time_window = rapl_compute_time_window_atom,
 };
 
+static const struct rapl_defaults rapl_defaults_ann = {
+       .floor_freq_reg_addr = 0,
+       .check_unit = rapl_check_unit_atom,
+       .set_floor_freq = NULL,
+       .compute_time_window = rapl_compute_time_window_atom,
+};
+
+static const struct rapl_defaults rapl_defaults_cht = {
+       .floor_freq_reg_addr = 0,
+       .check_unit = rapl_check_unit_atom,
+       .set_floor_freq = NULL,
+       .compute_time_window = rapl_compute_time_window_atom,
+};
+
 #define RAPL_CPU(_model, _ops) {                       \
                .vendor = X86_VENDOR_INTEL,             \
                .family = 6,                            \
@@ -1057,7 +1089,7 @@ static const struct rapl_defaults rapl_defaults_atom = {
 static const struct x86_cpu_id rapl_ids[] __initconst = {
        RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */
        RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */
-       RAPL_CPU(0x37, rapl_defaults_atom),/* Valleyview */
+       RAPL_CPU(0x37, rapl_defaults_byt),/* Valleyview */
        RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
        RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
        RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
@@ -1065,10 +1097,11 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
        RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
        RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
        RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
-       RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
-       RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+       RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
+       RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */
        RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
-       RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
+       RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */
+       RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
index fe8875f0d7be1155883655150ec57c3115356bbe..d3d1891cda3cf9a891b1714e240e73036109f2ac 100644 (file)
 #include <linux/bitmap.h>
 #include <linux/slab.h>
 
-#ifdef CONFIG_PM
-static int sh_pm_runtime_suspend(struct device *dev)
-{
-       int ret;
-
-       ret = pm_generic_runtime_suspend(dev);
-       if (ret) {
-               dev_err(dev, "failed to suspend device\n");
-               return ret;
-       }
-
-       ret = pm_clk_suspend(dev);
-       if (ret) {
-               dev_err(dev, "failed to suspend clock\n");
-               pm_generic_runtime_resume(dev);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int sh_pm_runtime_resume(struct device *dev)
-{
-       int ret;
-
-       ret = pm_clk_resume(dev);
-       if (ret) {
-               dev_err(dev, "failed to resume clock\n");
-               return ret;
-       }
-
-       return pm_generic_runtime_resume(dev);
-}
-
 static struct dev_pm_domain default_pm_domain = {
        .ops = {
-               .runtime_suspend = sh_pm_runtime_suspend,
-               .runtime_resume = sh_pm_runtime_resume,
+               USE_PM_CLK_RUNTIME_OPS
                USE_PLATFORM_PM_SLEEP_OPS
        },
 };
 
-#define DEFAULT_PM_DOMAIN_PTR  (&default_pm_domain)
-
-#else
-
-#define DEFAULT_PM_DOMAIN_PTR  NULL
-
-#endif /* CONFIG_PM */
-
 static struct pm_clk_notifier_block platform_bus_notifier = {
-       .pm_domain = DEFAULT_PM_DOMAIN_PTR,
+       .pm_domain = &default_pm_domain,
        .con_ids = { NULL, },
 };
 
index 0b0039634410c158a22c3667b142856dfa4a66a1..25266c600021462c8f809a22451231820caf1b6e 100644 (file)
@@ -20,6 +20,16 @@ struct pm_clk_notifier_block {
 
 struct clk;
 
+#ifdef CONFIG_PM
+extern int pm_clk_runtime_suspend(struct device *dev);
+extern int pm_clk_runtime_resume(struct device *dev);
+#define USE_PM_CLK_RUNTIME_OPS \
+       .runtime_suspend = pm_clk_runtime_suspend, \
+       .runtime_resume = pm_clk_runtime_resume,
+#else
+#define USE_PM_CLK_RUNTIME_OPS
+#endif
+
 #ifdef CONFIG_PM_CLK
 static inline bool pm_clk_no_clocks(struct device *dev)
 {