diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cpufreq/Kconfig | 1 | ||||
-rw-r--r-- | drivers/cpufreq/acpi-cpufreq.c | 212 | ||||
-rw-r--r-- | drivers/cpufreq/amd_freq_sensitivity.c | 8 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-dt.c | 300 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 333 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 282 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.c | 766 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.h | 261 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 445 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.h | 30 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_performance.c | 18 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_powersave.c | 10 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 10 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 192 | ||||
-rw-r--r-- | drivers/cpufreq/powernv-cpufreq.c | 152 |
15 files changed, 1344 insertions, 1676 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index f93511031177..a7f45853c103 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -19,6 +19,7 @@ config CPU_FREQ if CPU_FREQ config CPU_FREQ_GOV_COMMON + select IRQ_WORK bool config CPU_FREQ_BOOST_SW diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 51eef87bbc37..59a7b380fbe2 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -70,6 +70,8 @@ struct acpi_cpufreq_data { unsigned int cpu_feature; unsigned int acpi_perf_cpu; cpumask_var_t freqdomain_cpus; + void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val); + u32 (*cpu_freq_read)(struct acpi_pct_register *reg); }; /* acpi_perf_data is a pointer to percpu data. */ @@ -243,125 +245,119 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) } } -struct msr_addr { - u32 reg; -}; +u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) +{ + u32 val, dummy; -struct io_addr { - u16 port; - u8 bit_width; -}; + rdmsr(MSR_IA32_PERF_CTL, val, dummy); + return val; +} + +void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) +{ + u32 lo, hi; + + rdmsr(MSR_IA32_PERF_CTL, lo, hi); + lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE); + wrmsr(MSR_IA32_PERF_CTL, lo, hi); +} + +u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) +{ + u32 val, dummy; + + rdmsr(MSR_AMD_PERF_CTL, val, dummy); + return val; +} + +void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val) +{ + wrmsr(MSR_AMD_PERF_CTL, val, 0); +} + +u32 cpu_freq_read_io(struct acpi_pct_register *reg) +{ + u32 val; + + acpi_os_read_port(reg->address, &val, reg->bit_width); + return val; +} + +void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val) +{ + acpi_os_write_port(reg->address, val, reg->bit_width); +} struct drv_cmd { - unsigned int type; - const struct cpumask *mask; - union { - struct msr_addr msr; - struct io_addr io; - } addr; + struct acpi_pct_register *reg; u32 val; + union { + void (*write)(struct acpi_pct_register *reg, u32 val); + u32 (*read)(struct acpi_pct_register *reg); + } func; }; /* Called via smp_call_function_single(), on the target CPU */ static void do_drv_read(void *_cmd) { struct drv_cmd *cmd = _cmd; - u32 h; - switch (cmd->type) { - case SYSTEM_INTEL_MSR_CAPABLE: - case SYSTEM_AMD_MSR_CAPABLE: - rdmsr(cmd->addr.msr.reg, cmd->val, h); - break; - case SYSTEM_IO_CAPABLE: - acpi_os_read_port((acpi_io_address)cmd->addr.io.port, - &cmd->val, - (u32)cmd->addr.io.bit_width); - break; - default: - break; - } + cmd->val = cmd->func.read(cmd->reg); } -/* Called via smp_call_function_many(), on the target CPUs */ -static void do_drv_write(void *_cmd) +static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask) { - struct drv_cmd *cmd = _cmd; - u32 lo, hi; + struct acpi_processor_performance *perf = to_perf_data(data); + struct drv_cmd cmd = { + .reg = &perf->control_register, + .func.read = data->cpu_freq_read, + }; + int err; - switch (cmd->type) { - case SYSTEM_INTEL_MSR_CAPABLE: - rdmsr(cmd->addr.msr.reg, lo, hi); - lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); - wrmsr(cmd->addr.msr.reg, lo, hi); - break; - case SYSTEM_AMD_MSR_CAPABLE: - wrmsr(cmd->addr.msr.reg, cmd->val, 0); - break; - case SYSTEM_IO_CAPABLE: - acpi_os_write_port((acpi_io_address)cmd->addr.io.port, - cmd->val, - (u32)cmd->addr.io.bit_width); - break; - default: - break; - } + err = smp_call_function_any(mask, do_drv_read, &cmd, 1); + WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ + return cmd.val; } -static void drv_read(struct drv_cmd *cmd) +/* Called via smp_call_function_many(), on the target CPUs */ +static void do_drv_write(void *_cmd) { - int err; - cmd->val = 0; + struct drv_cmd *cmd = _cmd; - err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); - WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ + cmd->func.write(cmd->reg, cmd->val); } -static void drv_write(struct drv_cmd *cmd) +static void drv_write(struct acpi_cpufreq_data *data, + const struct cpumask *mask, u32 val) { + struct acpi_processor_performance *perf = to_perf_data(data); + struct drv_cmd cmd = { + .reg = &perf->control_register, + .val = val, + .func.write = data->cpu_freq_write, + }; int this_cpu; this_cpu = get_cpu(); - if (cpumask_test_cpu(this_cpu, cmd->mask)) - do_drv_write(cmd); - smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); + if (cpumask_test_cpu(this_cpu, mask)) + do_drv_write(&cmd); + + smp_call_function_many(mask, do_drv_write, &cmd, 1); put_cpu(); } -static u32 -get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data) +static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data) { - struct acpi_processor_performance *perf; - struct drv_cmd cmd; + u32 val; if (unlikely(cpumask_empty(mask))) return 0; - switch (data->cpu_feature) { - case SYSTEM_INTEL_MSR_CAPABLE: - cmd.type = SYSTEM_INTEL_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_IA32_PERF_CTL; - break; - case SYSTEM_AMD_MSR_CAPABLE: - cmd.type = SYSTEM_AMD_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_AMD_PERF_CTL; - break; - case SYSTEM_IO_CAPABLE: - cmd.type = SYSTEM_IO_CAPABLE; - perf = to_perf_data(data); - cmd.addr.io.port = perf->control_register.address; - cmd.addr.io.bit_width = perf->control_register.bit_width; - break; - default: - return 0; - } - - cmd.mask = mask; - drv_read(&cmd); + val = drv_read(data, mask); - pr_debug("get_cur_val = %u\n", cmd.val); + pr_debug("get_cur_val = %u\n", val); - return cmd.val; + return val; } static unsigned int get_cur_freq_on_cpu(unsigned int cpu) @@ -416,7 +412,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, { struct acpi_cpufreq_data *data = policy->driver_data; struct acpi_processor_performance *perf; - struct drv_cmd cmd; + const struct cpumask *mask; unsigned int next_perf_state = 0; /* Index into perf table */ int result = 0; @@ -434,42 +430,21 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } else { pr_debug("Already at target state (P%d)\n", next_perf_state); - goto out; + return 0; } } - switch (data->cpu_feature) { - case SYSTEM_INTEL_MSR_CAPABLE: - cmd.type = SYSTEM_INTEL_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_IA32_PERF_CTL; - cmd.val = (u32) perf->states[next_perf_state].control; - break; - case SYSTEM_AMD_MSR_CAPABLE: - cmd.type = SYSTEM_AMD_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_AMD_PERF_CTL; - cmd.val = (u32) perf->states[next_perf_state].control; - break; - case SYSTEM_IO_CAPABLE: - cmd.type = SYSTEM_IO_CAPABLE; - cmd.addr.io.port = perf->control_register.address; - cmd.addr.io.bit_width = perf->control_register.bit_width; - cmd.val = (u32) perf->states[next_perf_state].control; - break; - default: - result = -ENODEV; - goto out; - } - - /* cpufreq holds the hotplug lock, so we are safe from here on */ - if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) - cmd.mask = policy->cpus; - else - cmd.mask = cpumask_of(policy->cpu); + /* + * The core won't allow CPUs to go away until the governor has been + * stopped, so we can rely on the stability of policy->cpus. + */ + mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ? + cpumask_of(policy->cpu) : policy->cpus; - drv_write(&cmd); + drv_write(data, mask, perf->states[next_perf_state].control); if (acpi_pstate_strict) { - if (!check_freqs(cmd.mask, data->freq_table[index].frequency, + if (!check_freqs(mask, data->freq_table[index].frequency, data)) { pr_debug("acpi_cpufreq_target failed (%d)\n", policy->cpu); @@ -480,7 +455,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, if (!result) perf->state = next_perf_state; -out: return result; } @@ -740,15 +714,21 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) } pr_debug("SYSTEM IO addr space\n"); data->cpu_feature = SYSTEM_IO_CAPABLE; + data->cpu_freq_read = cpu_freq_read_io; + data->cpu_freq_write = cpu_freq_write_io; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: pr_debug("HARDWARE addr space\n"); if (check_est_cpu(cpu)) { data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; + data->cpu_freq_read = cpu_freq_read_intel; + data->cpu_freq_write = cpu_freq_write_intel; break; } if (check_amd_hwpstate_cpu(cpu)) { data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; + data->cpu_freq_read = cpu_freq_read_amd; + data->cpu_freq_write = cpu_freq_write_amd; break; } result = -ENODEV; diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index f6b79ab0070b..404360cad25c 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -21,7 +21,7 @@ #include <asm/msr.h> #include <asm/cpufeature.h> -#include "cpufreq_governor.h" +#include "cpufreq_ondemand.h" #define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080 #define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081 @@ -45,10 +45,10 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy, long d_actual, d_reference; struct msr actual, reference; struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu); - struct dbs_data *od_data = policy->governor_data; + struct policy_dbs_info *policy_dbs = policy->governor_data; + struct dbs_data *od_data = policy_dbs->dbs_data; struct od_dbs_tuners *od_tuners = od_data->tuners; - struct od_cpu_dbs_info_s *od_info = - od_data->cdata->get_cpu_dbs_info_s(policy->cpu); + struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs); if (!od_info->freq_table) return freq_next; diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 0ca74d070058..f951f911786e 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -31,9 +31,8 @@ struct private_data { struct device *cpu_dev; - struct regulator *cpu_reg; struct thermal_cooling_device *cdev; - unsigned int voltage_tolerance; /* in percentage */ + const char *reg_name; }; static struct freq_attr *cpufreq_dt_attr[] = { @@ -44,175 +43,128 @@ static struct freq_attr *cpufreq_dt_attr[] = { static int set_target(struct cpufreq_policy *policy, unsigned int index) { - struct dev_pm_opp *opp; - struct cpufreq_frequency_table *freq_table = policy->freq_table; - struct clk *cpu_clk = policy->clk; struct private_data *priv = policy->driver_data; - struct device *cpu_dev = priv->cpu_dev; - struct regulator *cpu_reg = priv->cpu_reg; - unsigned long volt = 0, tol = 0; - int volt_old = 0; - unsigned int old_freq, new_freq; - long freq_Hz, freq_exact; - int ret; - - freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); - if (freq_Hz <= 0) - freq_Hz = freq_table[index].frequency * 1000; - freq_exact = freq_Hz; - new_freq = freq_Hz / 1000; - old_freq = clk_get_rate(cpu_clk) / 1000; + return dev_pm_opp_set_rate(priv->cpu_dev, + policy->freq_table[index].frequency * 1000); +} - if (!IS_ERR(cpu_reg)) { - unsigned long opp_freq; +/* + * An earlier version of opp-v1 bindings used to name the regulator + * "cpu0-supply", we still need to handle that for backwards compatibility. + */ +static const char *find_supply_name(struct device *dev) +{ + struct device_node *np; + struct property *pp; + int cpu = dev->id; + const char *name = NULL; - rcu_read_lock(); - opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); - if (IS_ERR(opp)) { - rcu_read_unlock(); - dev_err(cpu_dev, "failed to find OPP for %ld\n", - freq_Hz); - return PTR_ERR(opp); - } - volt = dev_pm_opp_get_voltage(opp); - opp_freq = dev_pm_opp_get_freq(opp); - rcu_read_unlock(); - tol = volt * priv->voltage_tolerance / 100; - volt_old = regulator_get_voltage(cpu_reg); - dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n", - opp_freq / 1000, volt); - } + np = of_node_get(dev->of_node); - dev_dbg(cpu_dev, "%u MHz, %d mV --> %u MHz, %ld mV\n", - old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1, - new_freq / 1000, volt ? volt / 1000 : -1); + /* This must be valid for sure */ + if (WARN_ON(!np)) + return NULL; - /* scaling up? scale voltage before frequency */ - if (!IS_ERR(cpu_reg) && new_freq > old_freq) { - ret = regulator_set_voltage_tol(cpu_reg, volt, tol); - if (ret) { - dev_err(cpu_dev, "failed to scale voltage up: %d\n", - ret); - return ret; + /* Try "cpu0" for older DTs */ + if (!cpu) { + pp = of_find_property(np, "cpu0-supply", NULL); + if (pp) { + name = "cpu0"; + goto node_put; } } - ret = clk_set_rate(cpu_clk, freq_exact); - if (ret) { - dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); - if (!IS_ERR(cpu_reg) && volt_old > 0) - regulator_set_voltage_tol(cpu_reg, volt_old, tol); - return ret; + pp = of_find_property(np, "cpu-supply", NULL); + if (pp) { + name = "cpu"; + goto node_put; } - /* scaling down? scale voltage after frequency */ - if (!IS_ERR(cpu_reg) && new_freq < old_freq) { - ret = regulator_set_voltage_tol(cpu_reg, volt, tol); - if (ret) { - dev_err(cpu_dev, "failed to scale voltage down: %d\n", - ret); - clk_set_rate(cpu_clk, old_freq * 1000); - } - } - - return ret; + dev_dbg(dev, "no regulator for cpu%d\n", cpu); +node_put: + of_node_put(np); + return name; } -static int allocate_resources(int cpu, struct device **cdev, - struct regulator **creg, struct clk **cclk) +static int resources_available(void) { struct device *cpu_dev; struct regulator *cpu_reg; struct clk *cpu_clk; int ret = 0; - char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg; + const char *name; - cpu_dev = get_cpu_device(cpu); + cpu_dev = get_cpu_device(0); if (!cpu_dev) { - pr_err("failed to get cpu%d device\n", cpu); + pr_err("failed to get cpu0 device\n"); return -ENODEV; } - /* Try "cpu0" for older DTs */ - if (!cpu) - reg = reg_cpu0; - else - reg = reg_cpu; - -try_again: - cpu_reg = regulator_get_optional(cpu_dev, reg); - ret = PTR_ERR_OR_ZERO(cpu_reg); + cpu_clk = clk_get(cpu_dev, NULL); + ret = PTR_ERR_OR_ZERO(cpu_clk); if (ret) { /* - * If cpu's regulator supply node is present, but regulator is - * not yet registered, we should try defering probe. + * If cpu's clk node is present, but clock is not yet + * registered, we should try defering probe. */ - if (ret == -EPROBE_DEFER) { - dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", - cpu); - return ret; - } - - /* Try with "cpu-supply" */ - if (reg == reg_cpu0) { - reg = reg_cpu; - goto try_again; - } + if (ret == -EPROBE_DEFER) + dev_dbg(cpu_dev, "clock not ready, retry\n"); + else + dev_err(cpu_dev, "failed to get clock: %d\n", ret); - dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret); + return ret; } - cpu_clk = clk_get(cpu_dev, NULL); - ret = PTR_ERR_OR_ZERO(cpu_clk); - if (ret) { - /* put regulator */ - if (!IS_ERR(cpu_reg)) - regulator_put(cpu_reg); + clk_put(cpu_clk); + name = find_supply_name(cpu_dev); + /* Platform doesn't require regulator */ + if (!name) + return 0; + + cpu_reg = regulator_get_optional(cpu_dev, name); + ret = PTR_ERR_OR_ZERO(cpu_reg); + if (ret) { /* - * If cpu's clk node is present, but clock is not yet - * registered, we should try defering probe. + * If cpu's regulator supply node is present, but regulator is + * not yet registered, we should try defering probe. */ if (ret == -EPROBE_DEFER) - dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu); + dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n"); else - dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu, - ret); - } else { - *cdev = cpu_dev; - *creg = cpu_reg; - *cclk = cpu_clk; + dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret); + + return ret; } - return ret; + regulator_put(cpu_reg); + return 0; } static int cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *freq_table; - struct device_node *np; struct private_data *priv; struct device *cpu_dev; - struct regulator *cpu_reg; struct clk *cpu_clk; struct dev_pm_opp *suspend_opp; - unsigned long min_uV = ~0, max_uV = 0; unsigned int transition_latency; - bool need_update = false; + bool opp_v1 = false; + const char *name; int ret; - ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); - if (ret) { - pr_err("%s: Failed to allocate resources: %d\n", __func__, ret); - return ret; + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", policy->cpu); + return -ENODEV; } - np = of_node_get(cpu_dev->of_node); - if (!np) { - dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); - ret = -ENOENT; - goto out_put_reg_clk; + cpu_clk = clk_get(cpu_dev, NULL); + if (IS_ERR(cpu_clk)) { + ret = PTR_ERR(cpu_clk); + dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret); + return ret; } /* Get OPP-sharing information from "operating-points-v2" bindings */ @@ -223,9 +175,23 @@ static int cpufreq_init(struct cpufreq_policy *policy) * finding shared-OPPs for backward compatibility. */ if (ret == -ENOENT) - need_update = true; + opp_v1 = true; else - goto out_node_put; + goto out_put_clk; + } + + /* + * OPP layer will be taking care of regulators now, but it needs to know + * the name of the regulator first. + */ + name = find_supply_name(cpu_dev); + if (name) { + ret = dev_pm_opp_set_regulator(cpu_dev, name); + if (ret) { + dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n", + policy->cpu, ret); + goto out_put_clk; + } } /* @@ -246,12 +212,12 @@ static int cpufreq_init(struct cpufreq_policy *policy) */ ret = dev_pm_opp_get_opp_count(cpu_dev); if (ret <= 0) { - pr_debug("OPP table is not ready, deferring probe\n"); + dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); ret = -EPROBE_DEFER; goto out_free_opp; } - if (need_update) { + if (opp_v1) { struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); if (!pd || !pd->independent_clocks) @@ -265,10 +231,6 @@ static int cpufreq_init(struct cpufreq_policy *policy) if (ret) dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); - - of_property_read_u32(np, "clock-latency", &transition_latency); - } else { - transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev); } priv = kzalloc(sizeof(*priv), GFP_KERNEL); @@ -277,62 +239,16 @@ static int cpufreq_init(struct cpufreq_policy *policy) goto out_free_opp; } - of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); - - if (!transition_latency) - transition_latency = CPUFREQ_ETERNAL; - - if (!IS_ERR(cpu_reg)) { - unsigned long opp_freq = 0; - - /* - * Disable any OPPs where the connected regulator isn't able to - * provide the specified voltage and record minimum and maximum - * voltage levels. - */ - while (1) { - struct dev_pm_opp *opp; - unsigned long opp_uV, tol_uV; - - rcu_read_lock(); - opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq); - if (IS_ERR(opp)) { - rcu_read_unlock(); - break; - } - opp_uV = dev_pm_opp_get_voltage(opp); - rcu_read_unlock(); - - tol_uV = opp_uV * priv->voltage_tolerance / 100; - if (regulator_is_supported_voltage(cpu_reg, - opp_uV - tol_uV, - opp_uV + tol_uV)) { - if (opp_uV < min_uV) - min_uV = opp_uV; - if (opp_uV > max_uV) - max_uV = opp_uV; - } else { - dev_pm_opp_disable(cpu_dev, opp_freq); - } - - opp_freq++; - } - - ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); - if (ret > 0) - transition_latency += ret * 1000; - } + priv->reg_name = name; ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { - pr_err("failed to init cpufreq table: %d\n", ret); + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto out_free_priv; } priv->cpu_dev = cpu_dev; - priv->cpu_reg = cpu_reg; policy->driver_data = priv; - policy->clk = cpu_clk; rcu_read_lock(); @@ -357,9 +273,11 @@ static int cpufreq_init(struct cpufreq_policy *policy) cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } - policy->cpuinfo.transition_latency = transition_latency; + transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); + if (!transition_latency) + transition_latency = CPUFREQ_ETERNAL; - of_node_put(np); + policy->cpuinfo.transition_latency = transition_latency; return 0; @@ -369,12 +287,10 @@ out_free_priv: kfree(priv); out_free_opp: dev_pm_opp_of_cpumask_remove_table(policy->cpus); -out_node_put: - of_node_put(np); -out_put_reg_clk: + if (name) + dev_pm_opp_put_regulator(cpu_dev); +out_put_clk: clk_put(cpu_clk); - if (!IS_ERR(cpu_reg)) - regulator_put(cpu_reg); return ret; } @@ -386,9 +302,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); + if (priv->reg_name) + dev_pm_opp_put_regulator(priv->cpu_dev); + clk_put(policy->clk); - if (!IS_ERR(priv->cpu_reg)) - regulator_put(priv->cpu_reg); kfree(priv); return 0; @@ -441,9 +358,6 @@ static struct cpufreq_driver dt_cpufreq_driver = { static int dt_cpufreq_probe(struct platform_device *pdev) { - struct device *cpu_dev; - struct regulator *cpu_reg; - struct clk *cpu_clk; int ret; /* @@ -453,19 +367,15 @@ static int dt_cpufreq_probe(struct platform_device *pdev) * * FIXME: Is checking this only for CPU0 sufficient ? */ - ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk); + ret = resources_available(); if (ret) return ret; - clk_put(cpu_clk); - if (!IS_ERR(cpu_reg)) - regulator_put(cpu_reg); - dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev); ret = cpufreq_register_driver(&dt_cpufreq_driver); if (ret) - dev_err(cpu_dev, "failed register driver: %d\n", ret); + dev_err(&pdev->dev, "failed register driver: %d\n", ret); return ret; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e979ec78b695..4c7825856eab 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -38,48 +38,10 @@ static inline bool policy_is_inactive(struct cpufreq_policy *policy) return cpumask_empty(policy->cpus); } -static bool suitable_policy(struct cpufreq_policy *policy, bool active) -{ - return active == !policy_is_inactive(policy); -} - -/* Finds Next Acive/Inactive policy */ -static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, - bool active) -{ - do { - /* No more policies in the list */ - if (list_is_last(&policy->policy_list, &cpufreq_policy_list)) - return NULL; - - policy = list_next_entry(policy, policy_list); - } while (!suitable_policy(policy, active)); - - return policy; -} - -static struct cpufreq_policy *first_policy(bool active) -{ - struct cpufreq_policy *policy; - - /* No policies in the list */ - if (list_empty(&cpufreq_policy_list)) - return NULL; - - policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), - policy_list); - - if (!suitable_policy(policy, active)) - policy = next_policy(policy, active); - - return policy; -} - /* Macros to iterate over CPU policies */ -#define for_each_suitable_policy(__policy, __active) \ - for (__policy = first_policy(__active); \ - __policy; \ - __policy = next_policy(__policy, __active)) +#define for_each_suitable_policy(__policy, __active) \ + list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \ + if ((__active) == !policy_is_inactive(__policy)) #define for_each_active_policy(__policy) \ for_each_suitable_policy(__policy, true) @@ -102,7 +64,6 @@ static LIST_HEAD(cpufreq_governor_list); static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); static DEFINE_RWLOCK(cpufreq_driver_lock); -DEFINE_MUTEX(cpufreq_governor_lock); /* Flag to suspend/resume CPUFreq governors */ static bool cpufreq_suspended; @@ -113,10 +74,8 @@ static inline bool has_target(void) } /* internal prototypes */ -static int __cpufreq_governor(struct cpufreq_policy *policy, - unsigned int event); +static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); static unsigned int __cpufreq_get(struct cpufreq_policy *policy); -static void handle_update(struct work_struct *work); /** * Two notifier lists: the "policy" list is involved in the @@ -818,12 +777,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) ssize_t ret; down_read(&policy->rwsem); - - if (fattr->show) - ret = fattr->show(policy, buf); - else - ret = -EIO; - + ret = fattr->show(policy, buf); up_read(&policy->rwsem); return ret; @@ -838,18 +792,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, get_online_cpus(); - if (!cpu_online(policy->cpu)) - goto unlock; - - down_write(&policy->rwsem); - - if (fattr->store) + if (cpu_online(policy->cpu)) { + down_write(&policy->rwsem); ret = fattr->store(policy, buf, count); - else - ret = -EIO; + up_write(&policy->rwsem); + } - up_write(&policy->rwsem); -unlock: put_online_cpus(); return ret; @@ -959,6 +907,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) return cpufreq_add_dev_symlink(policy); } +__weak struct cpufreq_governor *cpufreq_default_governor(void) +{ + return NULL; +} + static int cpufreq_init_policy(struct cpufreq_policy *policy) { struct cpufreq_governor *gov = NULL; @@ -968,11 +921,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) /* Update governor of new_policy to the governor used before hotplug */ gov = find_governor(policy->last_governor); - if (gov) + if (gov) { pr_debug("Restoring governor %s for cpu %d\n", policy->governor->name, policy->cpu); - else - gov = CPUFREQ_DEFAULT_GOVERNOR; + } else { + gov = cpufreq_default_governor(); + if (!gov) + return -ENODATA; + } new_policy.governor = gov; @@ -996,36 +952,45 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp if (cpumask_test_cpu(cpu, policy->cpus)) return 0; + down_write(&policy->rwsem); if (has_target()) { - ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); if (ret) { pr_err("%s: Failed to stop governor\n", __func__); - return ret; + goto unlock; } } - down_write(&policy->rwsem); cpumask_set_cpu(cpu, policy->cpus); - up_write(&policy->rwsem); if (has_target()) { - ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + ret = cpufreq_governor(policy, CPUFREQ_GOV_START); if (!ret) - ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - if (ret) { + if (ret) pr_err("%s: Failed to start governor\n", __func__); - return ret; - } } - return 0; +unlock: + up_write(&policy->rwsem); + return ret; +} + +static void handle_update(struct work_struct *work) +{ + struct cpufreq_policy *policy = + container_of(work, struct cpufreq_policy, update); + unsigned int cpu = policy->cpu; + pr_debug("handle_update for cpu %u called\n", cpu); + cpufreq_update_policy(cpu); } static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); struct cpufreq_policy *policy; + int ret; if (WARN_ON(!dev)) return NULL; @@ -1043,7 +1008,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) goto err_free_rcpumask; - kobject_init(&policy->kobj, &ktype_cpufreq); + ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, + cpufreq_global_kobject, "policy%u", cpu); + if (ret) { + pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); + goto err_free_real_cpus; + } + INIT_LIST_HEAD(&policy->policy_list); init_rwsem(&policy->rwsem); spin_lock_init(&policy->transition_lock); @@ -1054,6 +1025,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) policy->cpu = cpu; return policy; +err_free_real_cpus: + free_cpumask_var(policy->real_cpus); err_free_rcpumask: free_cpumask_var(policy->related_cpus); err_free_cpumask: @@ -1158,16 +1131,6 @@ static int cpufreq_online(unsigned int cpu) cpumask_copy(policy->related_cpus, policy->cpus); /* Remember CPUs present at the policy creation time. */ cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); - - /* Name and add the kobject */ - ret = kobject_add(&policy->kobj, cpufreq_global_kobject, - "policy%u", - cpumask_first(policy->related_cpus)); - if (ret) { - pr_err("%s: failed to add policy->kobj: %d\n", __func__, - ret); - goto out_exit_policy; - } } /* @@ -1309,9 +1272,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) return ret; } -static void cpufreq_offline_prepare(unsigned int cpu) +static void cpufreq_offline(unsigned int cpu) { struct cpufreq_policy *policy; + int ret; pr_debug("%s: unregistering CPU %u\n", __func__, cpu); @@ -1321,13 +1285,13 @@ static void cpufreq_offline_prepare(unsigned int cpu) return; } + down_write(&policy->rwsem); if (has_target()) { - int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); if (ret) pr_err("%s: Failed to stop governor\n", __func__); } |