summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 19:12:53 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 19:12:53 -0700
commit3527d3e9514f013f361fba29fd71858d9361049d (patch)
tree6c46190e29a05c66b6efdaa9ba7ab2453c4bb51e
parent3711c94fd6593318146348c940d81040acf9e877 (diff)
parent21173d0b4d2a0b9e9e5f3155cf2cfc5781a6f4b1 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle were: - another round of rq-clock handling debugging, robustization and fixes - PELT accounting improvements - CPU hotplug related ->cpus_allowed affinity handling fixes all around the tree - ... plus misc fixes, cleanups and updates" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits) sched/x86: Update reschedule warning text crypto: N2 - Replace racy task affinity logic cpufreq/sparc-us2e: Replace racy task affinity logic cpufreq/sparc-us3: Replace racy task affinity logic cpufreq/sh: Replace racy task affinity logic cpufreq/ia64: Replace racy task affinity logic ACPI/processor: Replace racy task affinity logic ACPI/processor: Fix error handling in __acpi_processor_start() sparc/sysfs: Replace racy task affinity logic powerpc/smp: Replace open coded task affinity logic ia64/sn/hwperf: Replace racy task affinity logic ia64/salinfo: Replace racy task affinity logic workqueue: Provide work_on_cpu_safe() ia64/topology: Remove cpus_allowed manipulation sched/fair: Move the PELT constants into a generated header sched/fair: Increase PELT accuracy for small tasks sched/fair: Fix comments sched/Documentation: Add 'sched-pelt' tool sched/fair: Fix corner case in __accumulate_sum() sched/core: Remove 'task' parameter and rename tsk_restore_flags() to current_restore_flags() ...
-rw-r--r--Documentation/scheduler/sched-pelt.c108
-rw-r--r--arch/ia64/kernel/salinfo.c31
-rw-r--r--arch/ia64/kernel/topology.c6
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c17
-rw-r--r--arch/powerpc/kernel/smp.c26
-rw-r--r--arch/sparc/kernel/sysfs.c39
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--drivers/acpi/processor_driver.c10
-rw-r--r--drivers/acpi/processor_throttling.c62
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c92
-rw-r--r--drivers/cpufreq/sh-cpufreq.c45
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c45
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c46
-rw-r--r--drivers/crypto/n2_core.c31
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--kernel/sched/core.c201
-rw-r--r--kernel/sched/fair.c418
-rw-r--r--kernel/sched/features.h7
-rw-r--r--kernel/sched/rt.c81
-rw-r--r--kernel/sched/sched-pelt.h13
-rw-r--r--kernel/sched/sched.h65
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/workqueue.c23
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/sock.c2
29 files changed, 847 insertions, 544 deletions
diff --git a/Documentation/scheduler/sched-pelt.c b/Documentation/scheduler/sched-pelt.c
new file mode 100644
index 000000000000..e4219139386a
--- /dev/null
+++ b/Documentation/scheduler/sched-pelt.c
@@ -0,0 +1,108 @@
+/*
+ * The following program is used to generate the constants for
+ * computing sched averages.
+ *
+ * ==============================================================
+ * C program (compile with -lm)
+ * ==============================================================
+ */
+
+#include <math.h>
+#include <stdio.h>
+
+#define HALFLIFE 32
+#define SHIFT 32
+
+double y;
+
+void calc_runnable_avg_yN_inv(void)
+{
+ int i;
+ unsigned int x;
+
+ printf("static const u32 runnable_avg_yN_inv[] = {");
+ for (i = 0; i < HALFLIFE; i++) {
+ x = ((1UL<<32)-1)*pow(y, i);
+
+ if (i % 6 == 0) printf("\n\t");
+ printf("0x%8x, ", x);
+ }
+ printf("\n};\n\n");
+}
+
+int sum = 1024;
+
+void calc_runnable_avg_yN_sum(void)
+{
+ int i;
+
+ printf("static const u32 runnable_avg_yN_sum[] = {\n\t 0,");
+ for (i = 1; i <= HALFLIFE; i++) {
+ if (i == 1)
+ sum *= y;
+ else
+ sum = sum*y + 1024*y;
+
+ if (i % 11 == 0)
+ printf("\n\t");
+
+ printf("%5d,", sum);
+ }
+ printf("\n};\n\n");
+}
+
+int n = -1;
+/* first period */
+long max = 1024;
+
+void calc_converged_max(void)
+{
+ long last = 0, y_inv = ((1UL<<32)-1)*y;
+
+ for (; ; n++) {
+ if (n > -1)
+ max = ((max*y_inv)>>SHIFT) + 1024;
+ /*
+ * This is the same as:
+ * max = max*y + 1024;
+ */
+
+ if (last == max)
+ break;
+
+ last = max;
+ }
+ n--;
+ printf("#define LOAD_AVG_PERIOD %d\n", HALFLIFE);
+ printf("#define LOAD_AVG_MAX %ld\n", max);
+// printf("#define LOAD_AVG_MAX_N %d\n\n", n);
+}
+
+void calc_accumulated_sum_32(void)
+{
+ int i, x = sum;
+
+ printf("static const u32 __accumulated_sum_N32[] = {\n\t 0,");
+ for (i = 1; i <= n/HALFLIFE+1; i++) {
+ if (i > 1)
+ x = x/2 + sum;
+
+ if (i % 6 == 0)
+ printf("\n\t");
+
+ printf("%6d,", x);
+ }
+ printf("\n};\n\n");
+}
+
+void main(void)
+{
+ printf("/* Generated by Documentation/scheduler/sched-pelt; do not modify. */\n\n");
+
+ y = pow(0.5, 1/(double)HALFLIFE);
+
+ calc_runnable_avg_yN_inv();
+// calc_runnable_avg_yN_sum();
+ calc_converged_max();
+// calc_accumulated_sum_32();
+}
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index d194d5c83d32..63dc9cdc95c5 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -179,14 +179,14 @@ struct salinfo_platform_oemdata_parms {
const u8 *efi_guid;
u8 **oemdata;
u64 *oemdata_size;
- int ret;
};
-static void
+static long
salinfo_platform_oemdata_cpu(void *context)
{
struct salinfo_platform_oemdata_parms *parms = context;
- parms->ret = salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);
+
+ return salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);
}
static void
@@ -380,16 +380,7 @@ salinfo_log_release(struct inode *inode, struct file *file)
return 0;
}
-static void
-call_on_cpu(int cpu, void (*fn)(void *), void *arg)
-{
- cpumask_t save_cpus_allowed = current->cpus_allowed;
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
- (*fn)(arg);
- set_cpus_allowed_ptr(current, &save_cpus_allowed);
-}
-
-static void
+static long
salinfo_log_read_cpu(void *context)
{
struct salinfo_data *data = context;
@@ -399,6 +390,7 @@ salinfo_log_read_cpu(void *context)
/* Clear corrected errors as they are read from SAL */
if (rh->severity == sal_log_severity_corrected)
ia64_sal_clear_state_info(data->type);
+ return 0;
}
static void
@@ -430,7 +422,7 @@ retry:
spin_unlock_irqrestore(&data_saved_lock, flags);
if (!data->saved_num)
- call_on_cpu(cpu, salinfo_log_read_cpu, data);
+ work_on_cpu_safe(cpu, salinfo_log_read_cpu, data);
if (!data->log_size) {
data->state = STATE_NO_DATA;
cpumask_clear_cpu(cpu, &data->cpu_event);
@@ -459,11 +451,13 @@ salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *p
return simple_read_from_buffer(buffer, count, ppos, buf, bufsize);
}
-static void
+static long
salinfo_log_clear_cpu(void *context)
{
struct salinfo_data *data = context;
+
ia64_sal_clear_state_info(data->type);
+ return 0;
}
static int
@@ -486,7 +480,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
rh = (sal_log_record_header_t *)(data->log_buffer);
/* Corrected errors have already been cleared from SAL */
if (rh->severity != sal_log_severity_corrected)
- call_on_cpu(cpu, salinfo_log_clear_cpu, data);
+ work_on_cpu_safe(cpu, salinfo_log_clear_cpu, data);
/* clearing a record may make a new record visible */
salinfo_log_new_read(cpu, data);
if (data->state == STATE_LOG_RECORD) {
@@ -531,9 +525,8 @@ salinfo_log_write(struct file *file, const char __user *buffer, size_t count, lo
.oemdata = &data->oemdata,
.oemdata_size = &data->oemdata_size
};
- call_on_cpu(cpu, salinfo_platform_oemdata_cpu, &parms);
- if (parms.ret)
- count = parms.ret;
+ count = work_on_cpu_safe(cpu, salinfo_platform_oemdata_cpu,
+ &parms);
} else
data->oemdata_size = 0;
} else
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 1a68f012a6dc..d76529cbff20 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -355,18 +355,12 @@ static int cache_add_dev(unsigned int cpu)
unsigned long i, j;
struct cache_info *this_object;
int retval = 0;
- cpumask_t oldmask;
if (all_cpu_cache_info[cpu].kobj.parent)
return 0;
- oldmask = current->cpus_allowed;
- retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
- if (unlikely(retval))
- return retval;
retval = cpu_cache_sysfs_init(cpu);
- set_cpus_allowed_ptr(current, &oldmask);
if (unlikely(retval < 0))
return retval;
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 52704f199dd6..55febd65911a 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -598,12 +598,17 @@ static void sn_hwperf_call_sal(void *info)
op_info->ret = r;
}
+static long sn_hwperf_call_sal_work(void *info)
+{
+ sn_hwperf_call_sal(info);
+ return 0;
+}
+
static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
{
u32 cpu;
u32 use_ipi;
int r = 0;
- cpumask_t save_allowed;
cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
@@ -629,13 +634,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
/* use an interprocessor interrupt to call SAL */
smp_call_function_single(cpu, sn_hwperf_call_sal,
op_info, 1);
- }
- else {
- /* migrate the task before calling SAL */
- save_allowed = current->cpus_allowed;
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
- sn_hwperf_call_sal(op_info);
- set_cpus_allowed_ptr(current, &save_allowed);
+ } else {
+ /* Call on the target CPU */
+ work_on_cpu_safe(cpu, sn_hwperf_call_sal_work, op_info);
}
}
r = op_info->ret;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 46f89e66a273..d68ed1f004a3 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -787,24 +787,21 @@ static struct sched_domain_topology_level powerpc_topology[] = {
{ NULL, },
};
-void __init smp_cpus_done(unsigned int max_cpus)
+static __init long smp_setup_cpu_workfn(void *data __always_unused)
{
- cpumask_var_t old_mask;
+ smp_ops->setup_cpu(boot_cpuid);
+ return 0;
+}
- /* We want the setup_cpu() here to be called from CPU 0, but our
- * init thread may have been "borrowed" by another CPU in the meantime
- * se we pin us down to CPU 0 for a short while
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ /*
+ * We want the setup_cpu() here to be called on the boot CPU, but
+ * init might run on any CPU, so make sure it's invoked on the boot
+ * CPU.
*/
- alloc_cpumask_var(&old_mask, GFP_NOWAIT);
- cpumask_copy(old_mask, &current->cpus_allowed);
- set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
-
if (smp_ops && smp_ops->setup_cpu)
- smp_ops->setup_cpu(boot_cpuid);
-
- set_cpus_allowed_ptr(current, old_mask);
-
- free_cpumask_var(old_mask);
+ work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL);
if (smp_ops && smp_ops->bringup_done)
smp_ops->bringup_done();
@@ -812,7 +809,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
dump_numa_cpu_topology();
set_sched_topology(powerpc_topology);
-
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index d63fc613e7a9..5fd352b759af 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -98,27 +98,7 @@ static struct attribute_group mmu_stat_group = {
.name = "mmu_stats",
};
-/* XXX convert to rusty's on_one_cpu */
-static unsigned long run_on_cpu(unsigned long cpu,
- unsigned long (*func)(unsigned long),
- unsigned long arg)
-{
- cpumask_t old_affinity;
- unsigned long ret;
-
- cpumask_copy(&old_affinity, &current->cpus_allowed);
- /* should return -EINVAL to userspace */
- if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
- return 0;
-
- ret = func(arg);
-
- set_cpus_allowed_ptr(current, &old_affinity);
-
- return ret;
-}
-
-static unsigned long read_mmustat_enable(unsigned long junk)
+static long read_mmustat_enable(void *data __maybe_unused)
{
unsigned long ra = 0;
@@ -127,11 +107,11 @@ static unsigned long read_mmustat_enable(unsigned long junk)
return ra != 0;
}
-static unsigned long write_mmustat_enable(unsigned long val)
+static long write_mmustat_enable(void *data)
{
- unsigned long ra, orig_ra;
+ unsigned long ra, orig_ra, *val = data;
- if (val)
+ if (*val)
ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
else
ra = 0UL;
@@ -142,7 +122,8 @@ static unsigned long write_mmustat_enable(unsigned long val)
static ssize_t show_mmustat_enable(struct device *s,
struct device_attribute *attr, char *buf)
{
- unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
+ long val = work_on_cpu(s->id, read_mmustat_enable, NULL);
+
return sprintf(buf, "%lx\n", val);
}
@@ -150,13 +131,15 @@ static ssize_t store_mmustat_enable(struct device *s,
struct device_attribute *attr, const char *buf,
size_t count)
{
- unsigned long val, err;
- int ret = sscanf(buf, "%lu", &val);
+ unsigned long val;
+ long err;
+ int ret;
+ ret = sscanf(buf, "%lu", &val);
if (ret != 1)
return -EINVAL;
- err = run_on_cpu(s->id, write_mmustat_enable, val);
+ err = work_on_cpu(s->id, write_mmustat_enable, &val);
if (err)
return -EIO;
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d3c66a15bbde..3cab8415389a 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -124,7 +124,7 @@ static bool smp_no_nmi_ipi = false;
static void native_smp_send_reschedule(int cpu)
{
if (unlikely(cpu_is_offline(cpu))) {
- WARN_ON(1);
+ WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
return;
}
apic->send_IPI(cpu, RESCHEDULE_VECTOR);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 9d5f0c7ed3f7..8697a82bd465 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -251,6 +251,9 @@ static int __acpi_processor_start(struct acpi_device *device)
if (ACPI_SUCCESS(status))
return 0;
+ result = -ENODEV;
+ acpi_pss_perf_exit(pr, device);
+
err_power_exit:
acpi_processor_power_exit(pr);
return result;
@@ -259,11 +262,16 @@ err_power_exit:
static int acpi_processor_start(struct device *dev)
{
struct acpi_device *device = ACPI_COMPANION(dev);
+ int ret;
if (!device)
return -ENODEV;
- return __acpi_processor_start(device);
+ /* Protect against concurrent CPU hotplug operations */
+ get_online_cpus();
+ ret = __acpi_processor_start(device);
+ put_online_cpus();
+ return ret;
}
static int acpi_processor_stop(struct device *dev)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index a12f96cc93ff..3de34633f7f9 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg {
#define THROTTLING_POSTCHANGE (2)
static int acpi_processor_get_throttling(struct acpi_processor *pr);
-int acpi_processor_set_throttling(struct acpi_processor *pr,
- int state, bool force);
+static int __acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force, bool direct);
static int acpi_processor_update_tsd_coord(void)
{
@@ -891,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Invalid throttling state, reset\n"));
state = 0;
- ret = acpi_processor_set_throttling(pr, state, true);
+ ret = __acpi_processor_set_throttling(pr, state, true,
+ true);
if (ret)
return ret;
}
@@ -901,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
return 0;
}
-static int acpi_processor_get_throttling(struct acpi_processor *pr)
+static long __acpi_processor_get_throttling(void *data)
{
- cpumask_var_t saved_mask;
- int ret;
+ struct acpi_processor *pr = data;
+
+ return pr->throttling.acpi_processor_get_throttling(pr);
+}
+static int acpi_processor_get_throttling(struct acpi_processor *pr)
+{
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
/*
- * Migrate task to the cpu pointed by pr.
+ * This is either called from the CPU hotplug callback of
+ * processor_driver or via the ACPI probe function. In the latter
+ * case the CPU is not guaranteed to be online. Both call sites are
+ * protected against CPU hotplug.
*/
- cpumask_copy(saved_mask, &current->cpus_allowed);
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
- /* Can't migrate to the target pr->id CPU. Exit */
- free_cpumask_var(saved_mask);
+ if (!cpu_online(pr->id))
return -ENODEV;
- }
- ret = pr->throttling.acpi_processor_get_throttling(pr);
- /* restore the previous state */
- set_cpus_allowed_ptr(current, saved_mask);
- free_cpumask_var(saved_mask);
- return ret;
+ return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
}
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn(void *data)
arg->target_state, arg->force);
}
-int acpi_processor_set_throttling(struct acpi_processor *pr,
- int state, bool force)
+static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
+{
+ if (direct)
+ return fn(arg);
+ return work_on_cpu(cpu, fn, arg);
+}
+
+static int __acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force, bool direct)
{
int ret = 0;
unsigned int i;
@@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
arg.pr = pr;
arg.target_state = state;
arg.force = force;
- ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
+ ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
+ direct);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
@@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
arg.pr = match_pr;
arg.target_state = state;
arg.force = force;
- ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
- &arg);
+ ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
+ &arg, direct);
}
}
/*
@@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return ret;
}
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
+ bool force)
+{
+ return __acpi_processor_set_throttling(pr, state, force, false);
+}
+
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
{
int result = 0;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ac376b9b852d..56efb0444b4d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -381,7 +381,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
*sent += result;
} while (msg_data_left(&msg));
- tsk_restore_flags(current, pflags, PF_MEMALLOC);
+ current_restore_flags(pflags, PF_MEMALLOC);
return result;
}
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index e28a31a40829..a757c0a1e7b5 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -34,6 +34,11 @@ struct cpufreq_acpi_io {
unsigned int resume;
};
+struct cpufreq_acpi_req {
+ unsigned int cpu;
+ unsigned int state;
+};
+
static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
@@ -83,8 +88,7 @@ processor_get_pstate (
static unsigned
extract_clock (
struct cpufreq_acpi_io *data,
- unsigned value,
- unsigned int cpu)
+ unsigned value)
{
unsigned long i;
@@ -98,60 +102,43 @@ extract_clock (
}
-static unsigned int
+static long
processor_get_freq (
- struct cpufreq_acpi_io *data,
- unsigned int cpu)
+ void *arg)
{
- int ret = 0;
- u32 value = 0;
- cpumask_t saved_mask;
- unsigned long clock_freq;
+ struct cpufreq_acpi_req *req = arg;
+ unsigned int cpu = req->cpu;
+ struct cpufreq_acpi_io *data = acpi_io_data[cpu];
+ u32 value;
+ int ret;
pr_debug("processor_get_freq\n");
-
- saved_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (smp_processor_id() != cpu)
- goto migrate_end;
+ return -EAGAIN;
/* processor_get_pstate gets the instantaneous frequency */
ret = processor_get_pstate(&value);
-
if (ret) {
- set_cpus_allowed_ptr(current, &saved_mask);
pr_warn("get performance failed with error %d\n", ret);
- ret = 0;
- goto migrate_end;
+ return ret;
}
- clock_freq = extract_clock(data, value, cpu);
- ret = (clock_freq*1000);
-
-migrate_end:
- set_cpus_allowed_ptr(current, &saved_mask);
- return ret;
+ return 1000 * extract_clock(data, value);
}
-static int
+static long
processor_set_freq (
- struct cpufreq_acpi_io *data,
- struct cpufreq_policy *policy,
- int state)
+ void *arg)
{
- int ret = 0;
- u32 value = 0;
- cpumask_t saved_mask;
- int retval;
+ struct cpufreq_acpi_req *req = arg;
+ unsigned int cpu = req->cpu;
+ struct cpufreq_acpi_io *data = acpi_io_data[cpu];
+ int ret, state = req->state;
+ u32 value;
pr_debug("processor_set_freq\n");
-
- saved_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
- if (smp_processor_id() != policy->cpu) {
- retval = -EAGAIN;
- goto migrate_end;
- }
+ if (smp_processor_id() != cpu)
+ return -EAGAIN;
if (state == data->acpi_data.state) {
if (unlikely(data->resume)) {
@@ -159,8 +146,7 @@ processor_set_freq (
data->resume = 0;
} else {
pr_debug("Already at target state (P%d)\n", state);
- retval = 0;
- goto migrate_end;
+ return 0;
}
}
@@ -171,7 +157,6 @@ processor_set_freq (
* First we write the target state's 'control' value to the
* control_register.
*/
-
value = (u32) data->acpi_data.states[state].control;
pr_debug("Transitioning to state: 0x%08x\n", value);
@@ -179,17 +164,11 @@ processor_set_freq (
ret = processor_set_pstate(value);
if (ret) {
pr_warn("Transition failed with error %d\n", ret);
- retval = -ENODEV;
- goto migrate_end;
+ return -ENODEV;
}
data->acpi_data.state = state;
-
- retval = 0;
-
-migrate_end:
- set_cpus_allowed_ptr(current, &saved_mask);
- return (retval);
+ return 0;
}
@@ -197,11 +176,13 @@ static unsigned int
acpi_cpufreq_get (
unsigned int cpu)
{
- struct cpufreq_acpi_io *data = acpi_io_data[cpu];
+ struct cpufreq_acpi_req req;
+ long ret;
- pr_debug("acpi_cpufreq_get\n");
+ req.cpu = cpu;
+ ret = work_on_cpu(cpu, processor_get_freq, &req);
- return processor_get_freq(data, cpu);
+ return ret > 0 ? (unsigned int) ret : 0;
}
@@ -210,7 +191,12 @@ acpi_cpufreq_target (
struct cpufreq_policy *policy,
unsigned int index)
{
- return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
+ struct cpufreq_acpi_req req;
+
+ req.cpu = policy->cpu;
+ req.state = index;
+
+ return work_on_cpu(req.cpu, processor_set_freq, &req);
}
static int
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 86628e22b2a3..719c3d9f07fb 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -30,54 +30,63 @@
static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+struct cpufreq_target {
+ struct cpufreq_policy *policy;
+ unsigned int freq;
+};
+
static unsigned int sh_cpufreq_get(unsigned int cpu)
{
return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
}
-/*
- * Here we notify other drivers of the proposed change and the final change.
- */
-static int sh_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static long __sh_cpufreq_target(void *arg)
{
- unsigned int cpu = policy->cpu;
+ struct cpufreq_target *target = arg;
+ struct cpufreq_policy *policy = target->policy;
+ int cpu = policy->cpu;
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
- cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
struct device *dev;
long freq;
- cpus_allowed = current->cpus_allowed;
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
-
- BUG_ON(smp_processor_id() != cpu);
+ if (smp_processor_id() != cpu)
+ return -ENODEV;
dev = get_cpu_device(cpu);
/* Convert target_freq from kHz to Hz */
- freq = clk_round_rate(cpuclk, target_freq * 1000);
+ freq = clk_round_rate(cpuclk, target->freq * 1000);
if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
return -EINVAL;
- dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
+ dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000);
freqs.old = sh_cpufreq_get(cpu);
freqs.new = (freq + 500) / 1000;
freqs.flags = 0;
- cpufreq_freq_transition_begin(policy, &freqs);
- set_cpus_allowed_ptr(current, &cpus_allowed);
+ cpufreq_freq_transition_begin(target->policy, &freqs);
clk_set_rate(cpuclk, freq);
- cpufreq_freq_transition_end(policy, &freqs, 0);
+ cpufreq_freq_transition_end(target->policy, &freqs, 0);
dev_dbg(dev, "set frequency %lu Hz\n", freq);
-
return 0;
}
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int sh_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_target data = { .policy = policy, .freq = target_freq };
+
+ return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
+}
+
static int sh_cpufreq_verify(struct cpufreq_policy *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 35ddb6da93aa..90f33efee5fc 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -118,10 +118,6 @@ static void us2e_transition(unsigned long estar, unsigned long new_bits,
unsigned long clock_tick,
unsigned long old_divisor, unsigned long divisor)
{
- unsigned long flags;
-
- local_irq_save(flags);
-
estar &= ~ESTAR_MODE_DIV_MASK;
/* This is based upon the state transition diagram in the IIe manual. */
@@ -152,8 +148,6 @@ static void us2e_transition(unsigned long estar, unsigned long new_bits,
} else {
BUG();
}
-
- local_irq_restore(flags);
}
static unsigned long index_to_estar_mode(unsigned int index)
@@ -229,48 +223,51 @@ static unsigned long estar_to_divisor(unsigned long estar)
return ret;
}
+static void __us2e_freq_get(void *arg)
+{
+ unsigned long *estar = arg;
+
+ *estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+}
+
static unsigned int us2e_freq_get(unsigned int cpu)
{
- cpumask_t cpus_allowed;
unsigned long clock_tick, estar;
- cpumask_copy(&cpus_allowed, &current->cpus_allowed);
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
-
clock_tick = sparc64_get_clock_tick(cpu) / 1000;
- estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
-
- set_cpus_allowed_ptr(current, &cpus_allowed);
+ if (smp_call_function_single(cpu, __us2e_freq_get, &estar, 1))
+ return 0;
return clock_tick / estar_to_divisor(estar);
}