summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi5
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c20
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c17
-rw-r--r--drivers/cpufreq/intel_pstate.c910
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c23
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c24
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c275
-rw-r--r--drivers/thermal/Kconfig12
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c105
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/tick.h1
-rw-r--r--kernel/sched/cpufreq_schedutil.c82
-rw-r--r--kernel/time/tick-sched.c12
17 files changed, 840 insertions, 662 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 38d3e4ed7208..56a92ec7d3f7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3463,6 +3463,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
B: https://bugzilla.kernel.org
F: Documentation/cpu-freq/
+F: Documentation/devicetree/bindings/cpufreq/
F: drivers/cpufreq/
F: include/linux/cpufreq.h
F: tools/testing/selftests/cpufreq/
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 162e1eb5373d..6c5affe2d0f5 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -1189,11 +1189,6 @@
status = "disabled";
};
- cpufreq-cooling {
- compatible = "stericsson,db8500-cpufreq-cooling";
- status = "disabled";
- };
-
mcde@a0350000 {
compatible = "stericsson,mcde";
reg = <0xa0350000 0x1000>, /* MCDE */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 74fa5c5904d3..74ed7e9a7f27 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -247,6 +247,12 @@ config ARM_TEGRA124_CPUFREQ
help
This adds the CPUFreq driver support for Tegra124 SOCs.
+config ARM_TEGRA186_CPUFREQ
+ tristate "Tegra186 CPUFreq support"
+ depends on ARCH_TEGRA && TEGRA_BPMP
+ help
+ This adds the CPUFreq driver support for Tegra186 SOCs.
+
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9f5a8045f36d..b7e78f063c4f 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 5c3ec1dd4921..3575b82210ba 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_cooling.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -18,6 +19,7 @@
static struct cpufreq_frequency_table *freq_table;
static struct clk *armss_clk;
+static struct thermal_cooling_device *cdev;
static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
@@ -32,6 +34,22 @@ static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
return cpufreq_generic_init(policy, freq_table, 20 * 1000);
}
+static int dbx500_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ if (!IS_ERR(cdev))
+ cpufreq_cooling_unregister(cdev);
+ return 0;
+}
+
+static void dbx500_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ cdev = cpufreq_cooling_register(policy->cpus);
+ if (IS_ERR(cdev))
+ pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev));
+ else
+ pr_info("Cooling device registered: %s\n", cdev->type);
+}
+
static struct cpufreq_driver dbx500_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
@@ -39,6 +57,8 @@ static struct cpufreq_driver dbx500_cpufreq_driver = {
.target_index = dbx500_cpufreq_target,
.get = cpufreq_generic_get,
.init = dbx500_cpufreq_init,
+ .exit = dbx500_cpufreq_exit,
+ .ready = dbx500_cpufreq_ready,
.name = "DBX500",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 7719b02e04f5..9c13f097fd8c 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -161,8 +161,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
+ int ret;
+
policy->clk = arm_clk;
- return cpufreq_generic_init(policy, freq_table, transition_latency);
+ ret = cpufreq_generic_init(policy, freq_table, transition_latency);
+ policy->suspend_freq = policy->max;
+
+ return ret;
}
static struct cpufreq_driver imx6q_cpufreq_driver = {
@@ -173,6 +178,7 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.init = imx6q_cpufreq_init,
.name = "imx6q-cpufreq",
.attr = cpufreq_generic_attr,
+ .suspend = cpufreq_generic_suspend,
};
static int imx6q_cpufreq_probe(struct platform_device *pdev)
@@ -222,6 +228,13 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
arm_reg = regulator_get(cpu_dev, "arm");
pu_reg = regulator_get_optional(cpu_dev, "pu");
soc_reg = regulator_get(cpu_dev, "soc");
+ if (PTR_ERR(arm_reg) == -EPROBE_DEFER ||
+ PTR_ERR(soc_reg) == -EPROBE_DEFER ||
+ PTR_ERR(pu_reg) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ dev_dbg(cpu_dev, "regulators not ready, defer\n");
+ goto put_reg;
+ }
if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
dev_err(cpu_dev, "failed to get regulators\n");
ret = -ENOENT;
@@ -255,7 +268,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
- goto put_reg;
+ goto out_free_opp;
}
/* Make imx6_soc_volt array's size same as arm opp number */
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 283491f742d3..b7de5bd76a31 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,7 +37,11 @@
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
+#define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
+#define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC)
+
#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
+#define INTEL_CPUFREQ_TRANSITION_DELAY 500
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
@@ -74,6 +78,11 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
+static inline int32_t percent_fp(int percent)
+{
+ return div_fp(percent, 100);
+}
+
static inline u64 mul_ext_fp(u64 x, u64 y)
{
return (x * y) >> EXT_FRAC_BITS;
@@ -186,45 +195,22 @@ struct _pid {
};
/**
- * struct perf_limits - Store user and policy limits
- * @no_turbo: User requested turbo state from intel_pstate sysfs
- * @turbo_disabled: Platform turbo status either from msr
- * MSR_IA32_MISC_ENABLE or when maximum available pstate
- * matches the maximum turbo pstate
- * @max_perf_pct: Effective maximum performance limit in percentage, this
- * is minimum of either limits enforced by cpufreq policy
- * or limits from user set limits via intel_pstate sysfs
- * @min_perf_pct: Effective minimum performance limit in percentage, this
- * is maximum of either limits enforced by cpufreq policy
- * or limits from user set limits via intel_pstate sysfs
- * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
- * This value is used to limit max pstate
- * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
- * This value is used to limit min pstate
- * @max_policy_pct: The maximum performance in percentage enforced by
- * cpufreq setpolicy interface
- * @max_sysfs_pct: The maximum performance in percentage enforced by
- * intel pstate sysfs interface, unused when per cpu
- * controls are enforced
- * @min_policy_pct: The minimum performance in percentage enforced by
- * cpufreq setpolicy interface
- * @min_sysfs_pct: The minimum performance in percentage enforced by
- * intel pstate sysfs interface, unused when per cpu
- * controls are enforced
- *
- * Storage for user and policy defined limits.
+ * struct global_params - Global parameters, mostly tunable via sysfs.
+ * @no_turbo: Whether or not to use turbo P-states.
+ * @turbo_disabled: Whethet or not turbo P-states are available at all,
+ * based on the MSR_IA32_MISC_ENABLE value and whether or
+ * not the maximum reported turbo P-state is different from
+ * the maximum reported non-turbo one.
+ * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
+ * P-state capacity.
+ * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
+ * P-state capacity.
*/
-struct perf_limits {
- int no_turbo;
- int turbo_disabled;
+struct global_params {
+ bool no_turbo;
+ bool turbo_disabled;
int max_perf_pct;
int min_perf_pct;
- int32_t max_perf;
- int32_t min_perf;
- int max_policy_pct;
- int max_sysfs_pct;
- int min_policy_pct;
- int min_sysfs_pct;
};
/**
@@ -245,9 +231,10 @@ struct perf_limits {
* @prev_cummulative_iowait: IO Wait time difference from last and
* current sample
* @sample: Storage for storing last Sample data
- * @perf_limits: Pointer to perf_limit unique to this CPU
- * Not all field in the structure are applicable
- * when per cpu controls are enforced
+ * @min_perf: Minimum capacity limit as a fraction of the maximum
+ * turbo P-state capacity.
+ * @max_perf: Maximum capacity limit as a fraction of the maximum
+ * turbo P-state capacity.
* @acpi_perf_data: Stores ACPI perf information read from _PSS
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
* @epp_powersave: Last saved HWP energy performance preference
@@ -279,7 +266,8 @@ struct cpudata {
u64 prev_tsc;
u64 prev_cummulative_iowait;
struct sample sample;
- struct perf_limits *perf_limits;
+ int32_t min_perf;
+ int32_t max_perf;
#ifdef CONFIG_ACPI
struct acpi_processor_performance acpi_perf_data;
bool valid_pss_table;
@@ -324,7 +312,7 @@ struct pstate_adjust_policy {
* @get_scaling: Callback to get frequency scaling factor
* @get_val: Callback to convert P state to actual MSR write value
* @get_vid: Callback to get VID data for Atom platforms
- * @get_target_pstate: Callback to a function to calculate next P state to use
+ * @update_util: Active mode utilization update callback.
*
* Core and Atom CPU models have different way to get P State limits. This
* structure is used to store those callbacks.
@@ -337,43 +325,31 @@ struct pstate_funcs {
int (*get_scaling)(void);
u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
- int32_t (*get_target_pstate)(struct cpudata *);
+ void (*update_util)(struct update_util_data *data, u64 time,
+ unsigned int flags);
};
-/**
- * struct cpu_defaults- Per CPU model default config data
- * @pid_policy: PID config data
- * @funcs: Callback function data
- */
-struct cpu_defaults {
- struct pstate_adjust_policy pid_policy;
- struct pstate_funcs funcs;
+static struct pstate_funcs pstate_funcs __read_mostly;
+static struct pstate_adjust_policy pid_params __read_mostly = {
+ .sample_rate_ms = 10,
+ .sample_rate_ns = 10 * NSEC_PER_MSEC,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 20,
+ .d_gain_pct = 0,
+ .i_gain_pct = 0,
};
-static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
-static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
-
-static struct pstate_adjust_policy pid_params __read_mostly;
-static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
static bool per_cpu_limits __read_mostly;
-static bool driver_registered __read_mostly;
+static struct cpufreq_driver *intel_pstate_driver __read_mostly;
#ifdef CONFIG_ACPI
static bool acpi_ppc;
#endif
-static struct perf_limits global;
-
-static void intel_pstate_init_limits(struct perf_limits *limits)
-{
- memset(limits, 0, sizeof(*limits));
- limits->max_perf_pct = 100;
- limits->max_perf = int_ext_tofp(1);
- limits->max_policy_pct = 100;
- limits->max_sysfs_pct = 100;
-}
+static struct global_params global;
static DEFINE_MUTEX(intel_pstate_driver_lock);
static DEFINE_MUTEX(intel_pstate_limits_lock);
@@ -530,29 +506,6 @@ static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
}
#endif
-static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
- int deadband, int integral) {
- pid->setpoint = int_tofp(setpoint);
- pid->deadband = int_tofp(deadband);
- pid->integral = int_tofp(integral);
- pid->last_err = int_tofp(setpoint) - int_tofp(busy);
-}
-
-static inline void pid_p_gain_set(struct _pid *pid, int percent)
-{
- pid->p_gain = div_fp(percent, 100);
-}
-
-static inline void pid_i_gain_set(struct _pid *pid, int percent)
-{
- pid->i_gain = div_fp(percent, 100);
-}
-
-static inline void pid_d_gain_set(struct _pid *pid, int percent)
-{
- pid->d_gain = div_fp(percent, 100);
-}
-
static signed int pid_calc(struct _pid *pid, int32_t busy)
{
signed int result;
@@ -590,23 +543,17 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
return (signed int)fp_toint(result);
}
-static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
-{
- pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
- pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
- pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
-
- pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
-}
-
-static inline void intel_pstate_reset_all_pid(void)
+static inline void intel_pstate_pid_reset(struct cpudata *cpu)
{
- unsigned int cpu;
+ struct _pid *pid = &cpu->pid;
- for_each_online_cpu(cpu) {
- if (all_cpu_data[cpu])
- intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
- }
+ pid->p_gain = percent_fp(pid_params.p_gain_pct);
+ pid->d_gain = percent_fp(pid_params.d_gain_pct);
+ pid->i_gain = percent_fp(pid_params.i_gain_pct);
+ pid->setpoint = int_tofp(pid_params.setpoint);
+ pid->last_err = pid->setpoint - int_tofp(100);
+ pid->deadband = int_tofp(pid_params.deadband);
+ pid->integral = 0;
}
static inline void update_turbo_state(void)
@@ -621,6 +568,14 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
+static int min_perf_pct_min(void)
+{
+ struct cpudata *cpu = all_cpu_data[0];
+
+ return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
+ cpu->pstate.turbo_pstate);
+}
+
static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
{
u64 epb;
@@ -838,96 +793,80 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
-static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
+static void intel_pstate_hwp_set(unsigned int cpu)
{
- int min, hw_min, max, hw_max, cpu;
- struct perf_limits *perf_limits = &global;
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+ int min, hw_min, max, hw_max;
u64 value, cap;
+ s16 epp;
- for_each_cpu(cpu, policy->cpus) {
- struct cpudata *cpu_data = all_cpu_data[cpu];
- s16 epp;
-
- if (per_cpu_limits)
- perf_limits = all_cpu_data[cpu]->perf_limits;
-
- rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
- hw_min = HWP_LOWEST_PERF(cap);
- if (global.no_turbo)
- hw_max = HWP_GUARANTEED_PERF(cap);
- else
- hw_max = HWP_HIGHEST_PERF(cap);
-
- max = fp_ext_toint(hw_max * perf_limits->max_perf);
- if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
- min = max;
- else
- min = fp_ext_toint(hw_max * perf_limits->min_perf);
+ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+ hw_min = HWP_LOWEST_PERF(cap);
+ if (global.no_turbo)
+ hw_max = HWP_GUARANTEED_PERF(cap);
+ else
+ hw_max = HWP_HIGHEST_PERF(cap);
- rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ max = fp_ext_toint(hw_max * cpu_data->max_perf);
+ if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min = max;
+ else
+ min = fp_ext_toint(hw_max * cpu_data->min_perf);
- value &= ~HWP_MIN_PERF(~0L);
- value |= HWP_MIN_PERF(min);
+ rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
- value &= ~HWP_MAX_PERF(~0L);
- value |= HWP_MAX_PERF(max);
+ value &= ~HWP_MIN_PERF(~0L);
+ value |= HWP_MIN_PERF(min);
- if (cpu_data->epp_policy == cpu_data->policy)
- goto skip_epp;
+ value &= ~HWP_MAX_PERF(~0L);
+ value |= HWP_MAX_PERF(max);
- cpu_data->epp_policy = cpu_data->policy;
+ if (cpu_data->epp_policy == cpu_data->policy)
+ goto skip_epp;
- if (cpu_data->epp_saved >= 0) {
- epp = cpu_data->epp_saved;
- cpu_data->epp_saved = -EINVAL;
- goto update_epp;
- }
+ cpu_data->epp_policy = cpu_data->policy;
- if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
- epp = intel_pstate_get_epp(cpu_data, value);
- cpu_data->epp_powersave = epp;
- /* If EPP read was failed, then don't try to write */
- if (epp < 0)
- goto skip_epp;
+ if (cpu_data->epp_saved >= 0) {
+ epp = cpu_data->epp_saved;
+ cpu_data->epp_saved = -EINVAL;
+ goto update_epp;
+ }
+ if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ epp = intel_pstate_get_epp(cpu_data, value);
+ cpu_data->epp_powersave = epp;
+ /* If EPP read was failed, then don't try to write */
+ if (epp < 0)
+ goto skip_epp;
- epp = 0;
- } else {
- /* skip setting EPP, when saved value is invalid */
- if (cpu_data->epp_powersave < 0)
- goto skip_epp;
+ epp = 0;
+ } else {
+ /* skip setting EPP, when saved value is invalid */
+ if (cpu_data->epp_powersave < 0)
+ goto skip_epp;
- /*
- * No need to restore EPP when it is not zero. This
- * means:
- * - Policy is not changed
- * - user has manually changed
- * - Error reading EPB
- */
- epp = intel_pstate_get_epp(cpu_data, value);
- if (epp)
- goto skip_epp;
+ /*
+ * No need to restore EPP when it is not zero. This
+ * means:
+ * - Policy is not changed
+ * - user has manually changed
+ * - Error reading EPB
+ */
+ epp = intel_pstate_get_epp(cpu_data, value);
+ if (epp)
+ goto skip_epp;
- epp = cpu_data->epp_powersave;
- }
+ epp = cpu_data->epp_powersave;
+ }
update_epp:
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- } else {
- intel_pstate_set_epb(cpu, epp);
- }
-skip_epp:
- wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ } else {
+ intel_pstate_set_epb(cpu, epp);
}
-}
-
-static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
-{
- if (hwp_active)
- intel_pstate_hwp_set(policy);
-
- return 0;
+skip_epp:
+ wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
@@ -944,20 +883,17 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
- int ret;
-
if (!hwp_active)
return 0;
mutex_lock(&intel_pstate_limits_lock);
all_cpu_data[policy->cpu]->epp_policy = 0;
-
- ret = intel_pstate_hwp_set_policy(policy);
+ intel_pstate_hwp_set(policy->cpu);
mutex_unlock(&intel_pstate_limits_lock);
- return ret;
+ return 0;
}
static void intel_pstate_update_policies(void)
@@ -971,9 +907,14 @@ static void intel_pstate_update_policies(void)
/************************** debugfs begin ************************/
static int pid_param_set(void *data, u64 val)
{
+ unsigned int cpu;
+
*(u32 *)data = val;
pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
- intel_pstate_reset_all_pid();
+ for_each_possible_cpu(cpu)
+ if (all_cpu_data[cpu])
+ intel_pstate_pid_reset(all_cpu_data[cpu]);
+
return 0;
}
@@ -1084,7 +1025,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
mutex_lock(&intel_pstate_driver_lock);
- if (!driver_registered) {
+ if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
@@ -1109,7 +1050,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
mutex_lock(&intel_pstate_driver_lock);
- if (!driver_registered) {
+ if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
@@ -1129,7 +1070,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
mutex_lock(&intel_pstate_driver_lock);
- if (!driver_registered) {
+ if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
@@ -1157,7 +1098,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
mutex_lock(&intel_pstate_driver_lock);
- if (!driver_registered) {
+ if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
@@ -1174,6 +1115,15 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
global.no_turbo = clamp_t(int, input, 0, 1);
+ if (global.no_turbo) {
+ struct cpudata *cpu = all_cpu_data[0];
+ int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
+
+ /* Squash the global minimum into the permitted range. */
+ if (global.min_perf_pct > pct)
+ global.min_perf_pct = pct;
+ }
+
mutex_unlock(&intel_pstate_limits_lock);
intel_pstate_update_policies();
@@ -1195,18 +1145,14 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
mutex_lock(&intel_pstate_driver_lock);
- if (!driver_registered) {
+ if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
mutex_lock(&intel_pstate_limits_lock);
- global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
- global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
- global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
- global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
- global.max_perf = percent_ext_fp(global.max_perf_pct);
+ global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
mutex_unlock(&intel_pstate_limits_lock);
@@ -1229,18 +1175,15 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
mutex_lock(&intel_pstate_driver_lock);
- if (!driver_registered) {
+ if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
mutex_lock(&intel_pstate_limits_lock);
- global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
- global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
- global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
- global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
- global.min_perf = percent_ext_fp(global.min_perf_pct);
+ global.min_perf_pct = clamp_t(int, input,
+ min_perf_pct_min(), global.max_perf_pct);
mutex_unlock(&intel_pstate_limits_lock);
@@ -1554,132 +1497,10 @@ static int knl_get_turbo_pstate(void)
return ret;
}
-static struct cpu_defaults core_params = {
- .pid_policy = {
- .sample_rate_ms = 10,
- .deadband = 0,
- .setpoint = 97,
- .p_gain_pct = 20,
- .d_gain_pct = 0,
- .i_gain_pct = 0,
- },
- .funcs = {
- .get_max = core_get_max_pstate,
- .get_max_physical = core_get_max_pstate_physical,
- .get_min = core_get_min_pstate,
- .get_turbo = core_get_turbo_pstate,
- .get_scaling = core_get_scaling,
- .get_val = core_get_val,
- .get_target_pstate = get_target_pstate_use_performance,
- },
-};
-
-static const struct cpu_defaults silvermont_params = {
- .pid_policy = {
- .sample_rate_ms = 10,
- .deadband = 0,
- .setpoint = 60,
- .p_gain_pct = 14,
- .d_gain_pct = 0,
- .i_gain_pct = 4,
- },
- .funcs = {
- .get_max = atom_get_max_pstate,
- .get_max_physical = atom_get_max_pstate,
- .get_min = atom_get_min_pstate,
- .get_turbo = atom_get_turbo_pstate,
- .get_val = atom_get_val,
- .get_scaling = silvermont_get_scaling,
- .get_vid = atom_get_vid,
- .get_target_pstate = get_target_pstate_use_cpu_load,
- },
-};
-
-static const struct cpu_defaults airmont_params = {
- .pid_policy = {
- .sample_rate_ms = 10,
- .deadband = 0,
- .setpoint = 60,
- .p_gain_pct = 14,
- .d_gain_pct = 0,
- .i_gain_pct = 4,
- },
- .funcs = {
- .get_max = atom_get_max_pstate,
- .get_max_physical = atom_get_max_pstate,
- .get_min = atom_get_min_pstate,
- .get_turbo = atom_get_turbo_pstate,
- .get_val = atom_get_val,
- .get_scaling = airmont_get_scaling,
- .get_vid = atom_get_vid,
- .get_target_pstate = get_target_pstate_use_cpu_load,
- },
-};
-
-static const struct cpu_defaults knl_params = {
- .pid_policy = {
- .sample_rate_ms = 10,
- .deadband = 0,
- .setpoint = 97,
- .p_gain_pct = 20,
- .d_gain_pct = 0,
- .i_gain_pct = 0,
- },
- .funcs = {
- .get_max = core_get_max_pstate,
- .get_max_physical = core_get_max_pstate_physical,
- .get_min = core_get_min_pstate,
- .get_turbo = knl_get_turbo_pstate,
- .get_scaling = core_get_scaling,
- .get_val = core_get_val,
- .get_target_pstate = get_target_pstate_use_performance,
- },
-};
-
-static const struct cpu_defaults bxt_params = {
- .pid_policy = {
- .sample_rate_ms = 10,
- .deadband = 0,
- .setpoint = 60,
- .p_gain_pct = 14,
- .d_gain_pct = 0,
- .i_gain_pct = 4,
- },
- .funcs = {
- .get_max = core_get_max_pstate,
- .get_max_physical = core_get_max_pstate_physical,
- .get_min = core_get_min_pstate,
- .get_turbo = core_get_turbo_pstate,
- .get_scaling = core_get_scaling,
- .get_val = core_get_val,
- .get_target_pstate = get_target_pstate_use_cpu_load,
- },
-};
-
-static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
+static int intel_pstate_get_base_pstate(struct cpudata *cpu)
{
- int max_perf = cpu->pstate.turbo_pstate;
- int max_perf_adj;
- int min_perf;
- struct perf_limits *perf_limits = &global;
-
- if (global.no_turbo || global.turbo_disabled)
- max_perf = cpu->pstate.max_pstate;
-
- if (per_cpu_limits)
- perf_limits = cpu->perf_limits;
-
- /*
- * performance can be limited by user through sysfs, by cpufreq
- * policy, or by cpu specific default values determined through
- * experimentation.
- */
- max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf);
- *max = clamp_t(int, max_perf_adj,
- cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
-
- min_perf = fp_ext_toint(max_perf * perf_limits->min_perf);
- *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
+ return global.no_turbo || global.turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
}
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
@@ -1702,11 +1523,13 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
static void intel_pstate_max_within_limits(struct cpudata *cpu)
{
- int min_pstate, max_pstate;
+ int pstate;
update_turbo_state();
- intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
- intel_pstate_set_pstate(cpu, max_pstate);
+ pstate = intel_pstate_get_base_pstate(cpu);
+ pstate = max(cpu->pstate.min_pstate,
+ fp_ext_toint(pstate * cpu->max_perf));
+ intel_pstate_set_pstate(cpu, pstate);
}
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
@@ -1767,7 +1590,11 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
- return !!cpu->last_sample_time;
+ if (cpu->last_sample_time) {
+ intel_pstate_calc_avg_perf(cpu);
+ return true;
+ }
+ return false;
}
static inline int32_t get_avg_frequency(struct cpudata *cpu)
@@ -1788,6 +1615,9 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
int32_t busy_frac, boost;
int target, avg_pstate;
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return cpu->pstate.turbo_pstate;
+
busy_frac = div_fp(sample->mperf, sample->tsc);
boost = cpu->iowait_boost;
@@ -1824,6 +1654,9 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
u64 duration_ns;
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return cpu->pstate.turbo_pstate;
+
/*
* perf_scaled is the ratio of the average P-state during the last
* sampling period to the P-state requested last time (in percent).
@@ -1858,11 +1691,13 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{
- int max_perf, min_perf;
+ int max_pstate = intel_pstate_get_base_pstate(cpu);
+ int min_pstate;
- intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
- pstate = clamp_t(int, pstate, min_perf, max_perf);
- return pstate;
+ min_pstate = max(cpu->pstate.min_pstate,
+ fp_ext_toint(max_pstate * cpu->min_perf));
+ max_pstate = max(min_pstate, fp_ext_toint(max_pstate * cpu->max_perf));
+ return clamp_t(int, pstate, min_pstate, max_pstate);
}
static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
@@ -1874,16 +1709,11 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
-static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
+static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
{
- int from, target_pstate;
+ int from = cpu->pstate.current_pstate;
struct sample *sample;
- from = cpu->pstate.current_pstate;
-
- target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
- cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
-
update_turbo_state();
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
@@ -1902,76 +1732,155 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
fp_toint(cpu->iowait_boost * 100));
}
+static void intel_pstate_update_util_hwp(struct update_util_data *data,
+ u64 time, unsigned int flags)
+{
+ struct cpudata *cpu = container_of(data, struct cpudata, update_util);
+ u64 delta_ns = time - cpu->sample.time;
+
+ if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL)
+ intel_pstate_sample(cpu, time);
+}
+
+static void intel_pstate_update_util_pid(struct update_util_data *data,
+ u64 time, unsigned int flags)
+{
+ struct cpudata *cpu = container_of(data, struct cpudata, update_util);
+ u64 delta_ns = time - cpu->sample.time;
+
+ if ((s64)delta_ns < pid_params.sample_rate_ns)
+ return;
+
+ if (intel_pstate_sample(cpu, time)) {
+ int target_pstate;
+
+ target_pstate = get_target_pstate_use_performance(cpu);
+ intel_pstate_adjust_pstate(cpu, target_pstate);
+ }
+}
+
static void intel_pstate_update_util(struct update_util_data *data, u64 time,
unsigned int flags)
{
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns;
- if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) {
- if (flags & SCHED_CPUFREQ_IOWAIT) {
- cpu->iowait_boost = int_tofp(1);
- } else if (cpu->iowait_boost) {
- /* Clear iowait_boost if the CPU may have been idle. */
- delta_ns = time - cpu->last_update;
- if (delta_ns > TICK_NSEC)
- cpu->iowait_boost = 0;
- }
- cpu->last_update = time;
+ if (flags & SCHED_CPUFREQ_IOWAIT) {
+ cpu->iowait_boost = int_tofp(1);
+ } else if (cpu->iowait_boost) {
+ /* Clear iowait_boost if the CPU may have been idle. */
+ delta_ns = time - cpu->last_update;
+ if (delta_ns > TICK_NSEC)
+ c